source
stringlengths
3
92
c
stringlengths
26
2.25M
flowinfo_metadata.c
/* * Copyright 2014-2016 Nippon Telegraph and Telephone Corporation. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file flowinfo_metadata.c * @brief Optimized flow database for dataplane, for metadata */ #include <stdlib.h> #include "openflow.h" #include "lagopus/flowdb.h" #include "pktbuf.h" #include "packet.h" #include "lagopus/flowinfo.h" #define OXM_FIELD_TYPE(field) ((field) >> 1) #define METADATA_BITLEN (64) static lagopus_result_t add_flow_metadata_mask(struct flowinfo *, struct flow *); static lagopus_result_t del_flow_metadata_mask(struct flowinfo *, struct flow *); static struct flow * match_flow_metadata_mask(struct flowinfo *, struct lagopus_packet *, int32_t *); static struct flow * find_flow_metadata_mask(struct flowinfo *, struct flow *); static void destroy_flowinfo_metadata_mask(struct flowinfo *); static lagopus_result_t add_flow_metadata(struct flowinfo *, struct flow *); static lagopus_result_t del_flow_metadata(struct flowinfo *, struct flow *); static struct flow * match_flow_metadata(struct flowinfo *, struct lagopus_packet *, int32_t *); static struct flow * find_flow_metadata(struct flowinfo *, struct flow *); static void destroy_flowinfo_metadata(struct flowinfo *); static lagopus_result_t get_match_metadata(const struct match_list *match_list, uint64_t *metadata, uint64_t *mask) { const struct match *match; TAILQ_FOREACH(match, match_list, entry) { if (match->oxm_field == (OFPXMT_OFB_METADATA << 1) + 1) { OS_MEMCPY(metadata, match->oxm_value, sizeof(*metadata)); OS_MEMCPY(mask, &match->oxm_value[8], sizeof(*mask)); break; } if (OXM_FIELD_TYPE(match->oxm_field) == OFPXMT_OFB_METADATA) { OS_MEMCPY(metadata, match->oxm_value, sizeof(*metadata)); *mask = 0xffffffffffffffff; break; } } if (match == NULL) { return LAGOPUS_RESULT_NOT_FOUND; } return LAGOPUS_RESULT_OK; } struct flowinfo * new_flowinfo_metadata_mask(void) { struct flowinfo *self; self = calloc(1, sizeof(struct flowinfo)); if (self != NULL) { self->nflow = 0; self->nnext = 0; self->next = malloc(1); self->misc = new_flowinfo_eth_type(); self->add_func = add_flow_metadata_mask; self->del_func = del_flow_metadata_mask; self->match_func = match_flow_metadata_mask; self->find_func = find_flow_metadata_mask; self->destroy_func = destroy_flowinfo_metadata_mask; } return self; } static void destroy_flowinfo_metadata_mask(struct flowinfo *self) { struct flowinfo *flowinfo; unsigned int i; for (i = 0; i < self->nnext; i++) { flowinfo = self->next[i]; flowinfo->destroy_func(flowinfo); } free(self->next); free(self); } static void freeup_flowinfo(void *val) { struct flowinfo *flowinfo; flowinfo = val; flowinfo->destroy_func(flowinfo); } struct flowinfo * new_flowinfo_metadata(void) { struct flowinfo *self; self = calloc(1, sizeof(struct flowinfo)); if (self != NULL) { lagopus_hashmap_create(&self->hashmap, LAGOPUS_HASHMAP_TYPE_ONE_WORD, freeup_flowinfo); /* misc is not used */ self->add_func = add_flow_metadata; self->del_func = del_flow_metadata; self->match_func = match_flow_metadata; self->find_func = find_flow_metadata; self->destroy_func = destroy_flowinfo_metadata; } return self; } static void destroy_flowinfo_metadata(struct flowinfo *self) { lagopus_hashmap_destroy(&self->hashmap, true); free(self); } static lagopus_result_t add_flow_metadata_mask(struct flowinfo *self, struct flow *flow) { struct flowinfo *flowinfo; uint64_t metadata, mask; lagopus_result_t rv; unsigned int i; rv = get_match_metadata(&flow->match_list, &metadata, &mask); if (rv == LAGOPUS_RESULT_OK) { rv = LAGOPUS_RESULT_NOT_FOUND; for (i = 0; i < self->nnext; i++) { if (self->next[i]->userdata == mask) { flowinfo = self->next[i]; rv = LAGOPUS_RESULT_OK; break; } } if (rv == LAGOPUS_RESULT_NOT_FOUND) { /* new node. */ flowinfo = new_flowinfo_metadata(); flowinfo->userdata = mask; self->next = realloc(self->next, (unsigned long)(self->nnext + 1) * sizeof(struct flowinfo *)); self->next[self->nnext] = flowinfo; self->nnext++; } rv = flowinfo->add_func(flowinfo, flow); } else { rv = self->misc->add_func(self->misc, flow); } if (rv == LAGOPUS_RESULT_OK) { self->nflow++; } return rv; } static lagopus_result_t del_flow_metadata_mask(struct flowinfo *self, struct flow *flow) { struct flowinfo *flowinfo; uint64_t metadata, mask; lagopus_result_t rv; unsigned int i; rv = get_match_metadata(&flow->match_list, &metadata, &mask); if (rv == LAGOPUS_RESULT_OK) { rv = LAGOPUS_RESULT_NOT_FOUND; for (i = 0; i < self->nnext; i++) { if (self->next[i]->userdata == mask) { flowinfo = self->next[i]; rv = LAGOPUS_RESULT_OK; break; } } if (rv == LAGOPUS_RESULT_NOT_FOUND) { return LAGOPUS_RESULT_NOT_FOUND; } rv = flowinfo->del_func(flowinfo, flow); if (flowinfo->nflow == 0) { flowinfo->destroy_func(flowinfo); self->nnext--; memmove(&self->next[i], &self->next[i + 1], (size_t)(self->nnext - i)); } } else { rv = self->misc->del_func(self->misc, flow); } if (rv == LAGOPUS_RESULT_OK) { self->nflow--; } return rv; } static struct flow * match_flow_metadata_mask(struct flowinfo *self, struct lagopus_packet *pkt, int32_t *pri) { struct flowinfo *flowinfo; struct flow *flow[self->nnext], *matched, *alt_flow; struct flow mismatched = { .priority = 0, .flags = 0, .idle_timeout = 0, .hard_timeout = 0, .match_list = {NULL, NULL}, .instruction_list = {NULL, NULL}, .field_bits = 0 }; unsigned int i; matched = &mismatched; //#pragma omp parallel for for (i = 0; i < self->nnext; i++) { flowinfo = self->next[i]; flow[i] = flowinfo->match_func(flowinfo, pkt, pri); } for (i = 0; i < self->nnext; i++) { if (flow[i] != NULL && flow[i]->priority > matched->priority) { matched = flow[i]; } } alt_flow = self->misc->match_func(self->misc, pkt, pri); if (alt_flow != NULL) { matched = alt_flow; } if (matched == &mismatched) { matched = NULL; } return matched; } static struct flow * find_flow_metadata_mask(struct flowinfo *self, struct flow *flow) { struct flowinfo *flowinfo; uint64_t metadata, mask; lagopus_result_t rv; unsigned int i; rv = get_match_metadata(&flow->match_list, &metadata, &mask); if (rv == LAGOPUS_RESULT_OK) { rv = LAGOPUS_RESULT_NOT_FOUND; for (i = 0; i < self->nnext; i++) { if (self->next[i]->userdata == mask) { flowinfo = self->next[i]; rv = LAGOPUS_RESULT_OK; break; } } if (rv == LAGOPUS_RESULT_NOT_FOUND) { return NULL; } } else { flowinfo = self->misc; } return flowinfo->find_func(flowinfo, flow); } static lagopus_result_t add_flow_metadata(struct flowinfo *self, struct flow *flow) { struct flowinfo *flowinfo; uint64_t metadata, mask; lagopus_result_t rv; rv = get_match_metadata(&flow->match_list, &metadata, &mask); if (rv == LAGOPUS_RESULT_OK) { rv = lagopus_hashmap_find_no_lock(&self->hashmap, (void *)metadata, (void *)&flowinfo); if (rv != LAGOPUS_RESULT_OK) { void *val; flowinfo = new_flowinfo_eth_type(); val = flowinfo; rv = lagopus_hashmap_add_no_lock(&self->hashmap, (void *)metadata, (void *)&val, false); if (rv != LAGOPUS_RESULT_OK) { goto out; } } rv = flowinfo->add_func(flowinfo, flow); if (rv == LAGOPUS_RESULT_OK) { self->nflow++; } } out: return rv; } static lagopus_result_t del_flow_metadata(struct flowinfo *self, struct flow *flow) { uint64_t metadata, mask; lagopus_result_t rv; rv = get_match_metadata(&flow->match_list, &metadata, &mask); if (rv == LAGOPUS_RESULT_OK) { struct flowinfo *flowinfo; rv = lagopus_hashmap_find_no_lock(&self->hashmap, (void *)metadata, (void *)&flowinfo); if (rv == LAGOPUS_RESULT_OK) { rv = flowinfo->del_func(flowinfo, flow); } if (rv == LAGOPUS_RESULT_OK) { self->nflow--; } } return rv; } static struct flow * match_flow_metadata(struct flowinfo *self, struct lagopus_packet *pkt, int32_t *pri) { struct flowinfo *flowinfo; uint64_t metadata; struct flow *flow; lagopus_result_t rv; flow = NULL; metadata = (pkt->oob_data.metadata & self->userdata); rv = lagopus_hashmap_find_no_lock(&self->hashmap, (void *)metadata, (void *)&flowinfo); if (rv == LAGOPUS_RESULT_OK) { flow = flowinfo->match_func(flowinfo, pkt, pri); } return flow; } static struct flow * find_flow_metadata(struct flowinfo *self, struct flow *flow) { struct flowinfo *flowinfo; uint64_t metadata, mask; lagopus_result_t rv; rv = get_match_metadata(&flow->match_list, &metadata, &mask); if (rv == LAGOPUS_RESULT_OK) { rv = lagopus_hashmap_find_no_lock(&self->hashmap, (void *)metadata, (void *)&flowinfo); if (rv != LAGOPUS_RESULT_OK) { return NULL; } } else { flowinfo = self->misc; } return flowinfo->find_func(flowinfo, flow); }
GB_dense_subassign_05d_template.c
//------------------------------------------------------------------------------ // GB_dense_subassign_05d_template: C<M> = x where C is dense //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ { //-------------------------------------------------------------------------- // get C and M //-------------------------------------------------------------------------- ASSERT (GB_JUMBLED_OK (M)) ; const int64_t *GB_RESTRICT Mp = M->p ; const int8_t *GB_RESTRICT Mb = M->b ; const int64_t *GB_RESTRICT Mh = M->h ; const int64_t *GB_RESTRICT Mi = M->i ; const GB_void *GB_RESTRICT Mx = (GB_void *) (Mask_struct ? NULL : (M->x)) ; const size_t msize = M->type->size ; const size_t mvlen = M->vlen ; GB_CTYPE *GB_RESTRICT Cx = (GB_CTYPE *) C->x ; const int64_t cvlen = C->vlen ; //-------------------------------------------------------------------------- // C<M> = x //-------------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < ntasks ; taskid++) { // if kfirst > klast then taskid does no work at all int64_t kfirst = kfirst_slice [taskid] ; int64_t klast = klast_slice [taskid] ; //---------------------------------------------------------------------- // C<M(:,kfirst:klast)> = x //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // find the part of M(:,k) to be operated on by this task //------------------------------------------------------------------ int64_t j = GBH (Mh, k) ; int64_t pM_start, pM_end ; GB_get_pA (&pM_start, &pM_end, taskid, k, kfirst, klast, pstart_slice, Mp, mvlen) ; // pC points to the start of C(:,j) if C is dense int64_t pC = j * cvlen ; //------------------------------------------------------------------ // C<M(:,j)> = x //------------------------------------------------------------------ if (Mx == NULL && Mb == NULL) { GB_PRAGMA_SIMD_VECTORIZE for (int64_t pM = pM_start ; pM < pM_end ; pM++) { int64_t p = pC + GBI (Mi, pM, mvlen) ; GB_COPY_SCALAR_TO_C (p, cwork) ; // Cx [p] = scalar } } else { GB_PRAGMA_SIMD_VECTORIZE for (int64_t pM = pM_start ; pM < pM_end ; pM++) { if (GBB (Mb, pM) && GB_mcast (Mx, pM, msize)) { int64_t p = pC + GBI (Mi, pM, mvlen) ; GB_COPY_SCALAR_TO_C (p, cwork) ; // Cx [p] = scalar } } } } } }
omp_loop.c
#include <omp.h> #include <stdio.h> #define N 100000 int main () { int i,tid,NTHR; float a[N], b[N], c[N]; /* Some initializations */ for (i=0; i < N; i++) a[i] = b[i] = i * 1.0; double start = omp_get_wtime(); //omp_set_dynamic(0); //omp_set_num_threads(4); #pragma omp parallel shared(a,b,c) private(i) { #pragma omp for schedule(static) for (i=0; i < N; i++) c[i] = a[i] + b[i]; } /* end of parallel section */ double end = omp_get_wtime(); printf("start time = %f\n",start); printf("end time = %f\n",end); printf("diff time = %f\n",end - start); return 0; }
utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file utils.h * \brief Basic utilility functions. */ #ifndef MXNET_COMMON_UTILS_H_ #define MXNET_COMMON_UTILS_H_ #include <dmlc/logging.h> #include <dmlc/omp.h> #include <nnvm/graph.h> #include <mxnet/engine.h> #include <mxnet/ndarray.h> #include <mxnet/op_attr_types.h> #include <mxnet/graph_attr_types.h> #include <nnvm/graph_attr_types.h> #include <memory> #include <vector> #include <type_traits> #include <utility> #include <random> #include <string> #include <thread> #include <algorithm> #include <functional> #include <limits> #include "../operator/mxnet_op.h" namespace mxnet { namespace common { /*! * \brief IndPtr should be non-negative, in non-decreasing order, start with 0 * and end with value equal with size of indices. */ struct csr_indptr_check { template<typename DType, typename IType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr, const nnvm::dim_t end, const nnvm::dim_t idx_size) { if (indptr[i+1] < 0 || indptr[i+1] < indptr[i] || (i == 0 && indptr[i] != 0) || (i == end - 1 && indptr[end] != idx_size)) *out = kCSRIndPtrErr; } }; /*! * \brief Indices should be non-negative, less than the number of columns * and in ascending order per row. */ struct csr_idx_check { template<typename DType, typename IType, typename RType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx, const RType* indptr, const nnvm::dim_t ncols) { for (RType j = indptr[i]; j < indptr[i+1]; j++) { if (idx[j] >= ncols || idx[j] < 0 || (j < indptr[i+1] - 1 && idx[j] >= idx[j+1])) { *out = kCSRIdxErr; break; } } } }; /*! * \brief Indices of RSPNDArray should be non-negative, * less than the size of first dimension and in ascending order */ struct rsp_idx_check { template<typename DType, typename IType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx, const nnvm::dim_t end, const nnvm::dim_t nrows) { if ((i < end && idx[i+1] <= idx[i]) || idx[i] < 0 || idx[i] >= nrows) *out = kRSPIdxErr; } }; template<typename xpu> void CheckFormatWrapper(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check); /*! * \brief Check the validity of CSRNDArray. * \param rctx Execution context. * \param input Input NDArray of CSRStorage. * \param err_cpu Error number on cpu. * \param full_check If true, rigorous check, O(N) operations, * otherwise basic check, O(1) operations. */ template<typename xpu> void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { using namespace op::mxnet_op; CHECK_EQ(input.storage_type(), kCSRStorage) << "CheckFormatCSRImpl is for CSRNDArray"; const TShape shape = input.shape(); const TShape idx_shape = input.aux_shape(csr::kIdx); const TShape indptr_shape = input.aux_shape(csr::kIndPtr); const TShape storage_shape = input.storage_shape(); if ((shape.ndim() != 2) || (idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) || (indptr_shape[0] != shape[0] + 1) || (idx_shape[0] != storage_shape[0])) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { DType* err = err_cpu.dptr<DType>(); *err = kCSRShapeErr; }); return; } if (full_check) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, { mshadow::Stream<xpu> *s = rctx.get_stream<xpu>(); NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_); TBlob val_xpu = ret_xpu.data(); Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>()); Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(), input.aux_data(csr::kIndPtr).dptr<RType>(), indptr_shape[0] - 1, idx_shape[0]); // no need to check indices if indices are empty if (idx_shape[0] != 0) { Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(), input.aux_data(csr::kIdx).dptr<IType>(), input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]); } mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s); }); }); }); } } /*! * \brief Check the validity of RowSparseNDArray. * \param rctx Execution context. * \param input Input NDArray of RowSparseStorage. * \param err_cpu Error number on cpu. * \param full_check If true, rigorous check, O(N) operations, * otherwise basic check, O(1) operations. */ template<typename xpu> void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { using namespace op::mxnet_op; CHECK_EQ(input.storage_type(), kRowSparseStorage) << "CheckFormatRSPImpl is for RSPNDArray"; const TShape idx_shape = input.aux_shape(rowsparse::kIdx); if (idx_shape[0] != input.storage_shape()[0]) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { DType* err = err_cpu.dptr<DType>(); *err = kRSPShapeErr; }); return; } if (idx_shape[0] == 0) { return; } if (full_check) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, { mshadow::Stream<xpu> *s = rctx.get_stream<xpu>(); NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_); TBlob val_xpu = ret_xpu.data(); Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>()); Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0], val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(), idx_shape[0] - 1, input.shape()[0]); mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s); }); }); } } template<typename xpu> void CheckFormatImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { int stype = input.storage_type(); if (stype == kCSRStorage) { CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check); } else if (stype == kRowSparseStorage) { CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check); } else if (stype == kDefaultStorage) { // no-op for default storage } else { LOG(FATAL) << "Unknown storage type " << stype; } } /*! \brief Pick rows specified by user input index array from a row sparse ndarray * and save them in the output sparse ndarray. */ template<typename xpu> void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu> *s, const NDArray& input_nd, const TBlob& idx_data, const OpReqType req, NDArray* output_nd); /* \brief Casts tensor storage type to the new type. */ template<typename xpu> void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output); /*! \brief returns true if all storage types in `vstorage` are the same as target `stype`. * false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype) { if (!vstorage.empty()) { for (const auto& i : vstorage) { if (i != stype) return false; } return true; } return false; } /*! \brief returns true if all storage types in `vstorage` are the same as target `stype1` * or `stype2'. Sets boolean if both found. * false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype1, const NDArrayStorageType stype2, bool *has_both) { if (has_both) { *has_both = false; } if (!vstorage.empty()) { uint8_t has = 0; for (const auto i : vstorage) { if (i == stype1) { has |= 1; } else if (i == stype2) { has |= 2; } else { return false; } } if (has_both) { *has_both = has == 3; } return true; } return false; } /*! \brief returns true if the storage types of arrays in `ndarrays` * are the same as target `stype`. false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype) { if (!ndarrays.empty()) { for (const auto& nd : ndarrays) { if (nd.storage_type() != stype) { return false; } } return true; } return false; } /*! \brief returns true if the storage types of arrays in `ndarrays` * are the same as targets `stype1` or `stype2`. false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype1, const NDArrayStorageType stype2, bool *has_both) { if (has_both) { *has_both = false; } if (!ndarrays.empty()) { uint8_t has = 0; for (const auto& nd : ndarrays) { const NDArrayStorageType stype = nd.storage_type(); if (stype == stype1) { has |= 1; } else if (stype == stype2) { has |= 2; } else { return false; } } if (has_both) { *has_both = has == 3; } return true; } return false; } /*! \brief get string representation of dispatch_mode */ inline std::string dispatch_mode_string(const DispatchMode x) { switch (x) { case DispatchMode::kFCompute: return "fcompute"; case DispatchMode::kFComputeEx: return "fcompute_ex"; case DispatchMode::kFComputeFallback: return "fcompute_fallback"; case DispatchMode::kVariable: return "variable"; case DispatchMode::kUndefined: return "undefined"; } return "unknown"; } /*! \brief get string representation of storage_type */ inline std::string stype_string(const int x) { switch (x) { case kDefaultStorage: return "default"; case kCSRStorage: return "csr"; case kRowSparseStorage: return "row_sparse"; } return "unknown"; } /*! \brief get string representation of device type */ inline std::string dev_type_string(const int dev_type) { switch (dev_type) { case Context::kCPU: return "cpu"; case Context::kGPU: return "gpu"; case Context::kCPUPinned: return "cpu_pinned"; case Context::kCPUShared: return "cpu_shared"; } return "unknown"; } /*! \brief get string representation of the operator stypes */ inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs, const int dev_mask, const std::vector<int>& in_attrs, const std::vector<int>& out_attrs) { std::ostringstream os; os << "operator = " << attrs.op->name << "\ninput storage types = ["; for (const int attr : in_attrs) { os << stype_string(attr) << ", "; } os << "]\n" << "output storage types = ["; for (const int attr : out_attrs) { os << stype_string(attr) << ", "; } os << "]\n" << "params = {"; for (auto kv : attrs.dict) { os << "\"" << kv.first << "\" : " << kv.second << ", "; } os << "}\n" << "context.dev_mask = " << dev_type_string(dev_mask); return os.str(); } /*! \brief get string representation of the operator */ inline std::string operator_string(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { std::string result = ""; std::vector<int> in_stypes; std::vector<int> out_stypes; in_stypes.reserve(inputs.size()); out_stypes.reserve(outputs.size()); auto xform = [](const NDArray arr) -> int { return arr.storage_type(); }; std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform); std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform); result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes); return result; } /*! \brief log message once. Intended for storage fallback warning messages. */ inline void LogOnce(const std::string& message) { typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore; auto log_store = LogStore::Get(); if (log_store->find(message) == log_store->end()) { LOG(INFO) << message; log_store->insert(message); } } /*! \brief log storage fallback event */ inline void LogStorageFallback(const nnvm::NodeAttrs& attrs, const int dev_mask, const std::vector<int>* in_attrs, const std::vector<int>* out_attrs) { static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true); if (!log) return; const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs); std::ostringstream os; const char* warning = "\nThe operator with default storage type will be dispatched " "for execution. You're seeing this warning message because the operator above is unable " "to process the given ndarrays with specified storage types, context and parameter. " "Temporary dense ndarrays are generated in order to execute the operator. " "This does not affect the correctness of the programme. " "You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to " "0 to suppress this warning."; os << "\nStorage type fallback detected:\n" << op_str << warning; LogOnce(os.str()); } // heuristic to dermine number of threads per GPU inline int GetNumThreadsPerGPU() { // This is resource efficient option. return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2); } // heuristic to get number of matching colors. // this decides how much parallelism we can get in each GPU. inline int GetExecNumMatchColor() { // This is resource efficient option. int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1); return std::min(num_match_color, GetNumThreadsPerGPU()); } template<typename T, typename V> V ParallelAccumulate(const T* a, const int n, V start) { V sum = start; #pragma omp parallel for reduction(+:sum) for (int i = 0; i < n; ++i) { sum += a[i]; } return sum; } /*! * \brief * Helper function for ParallelSort. * DO NOT call this function directly. * Use the interface ParallelSort instead. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt, typename Compare> void ParallelSortHelper(RandomIt first, size_t len, size_t grainsize, const Compare& comp) { if (len < grainsize) { std::sort(first, first+len, comp); } else { std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp); ParallelSortHelper(first+len/2, len - len/2, grainsize, comp); thr.join(); std::inplace_merge(first, first+len/2, first+len, comp); } } /*! * \brief * Sort the elements in the range [first, last) into the ascending order defined by * the comparator comp. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt, typename Compare> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) { const auto num = std::distance(first, last); size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16)); ParallelSortHelper(first, num, grainsize, comp); } /*! * \brief * Sort the elements in the range [first, last) into ascending order. * The elements are compared using the default < operator. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) { ParallelSort(first, last, num_threads, std::less<typename std::iterator_traits<RandomIt>::value_type>()); } /*! * \brief Random Engine */ typedef std::mt19937 RANDOM_ENGINE; /*! * \brief Helper functions. */ namespace helper { /*! * \brief Helper for non-array type `T`. */ template <class T> struct UniqueIf { /*! * \brief Type of `T`. */ using SingleObject = std::unique_ptr<T>; }; /*! * \brief Helper for an array of unknown bound `T`. */ template <class T> struct UniqueIf<T[]> { /*! * \brief Type of `T`. */ using UnknownBound = std::unique_ptr<T[]>; }; /*! * \brief Helper for an array of known bound `T`. */ template <class T, size_t kSize> struct UniqueIf<T[kSize]> { /*! * \brief Type of `T`. */ using KnownBound = void; }; } // namespace helper /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs a non-array type `T`. The arguments `args` are passed to the * constructor of `T`. The function does not participate in the overload * resolution if `T` is an array type. */ template <class T, class... Args> typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) { return std::unique_ptr<T>(new T(std::forward<Args>(args)...)); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param n The size of the array to construct. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs an array of unknown bound `T`. The function does not participate * in the overload resolution unless `T` is an array of unknown bound. */ template <class T> typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) { using U = typename std::remove_extent<T>::type; return std::unique_ptr<T>(new U[n]{}); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * * Constructs an arrays of known bound is disallowed. */ template <class T, class... Args> typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete; template<typename FCompType> FCompType GetFCompute(const nnvm::Op* op, const std::string& name, const Context& ctx) { static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>"); static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>"); if (ctx.dev_mask() == cpu::kDevMask) { return fcompute_cpu.get(op, nullptr); } else if (ctx.dev_mask() == gpu::kDevMask) { return fcompute_gpu.get(op, nullptr); } else { LOG(FATAL) << "Unknown device mask"; return nullptr; } } /*! * \brief Return the max integer value representable in the type `T` without loss of precision. */ template <typename T> constexpr size_t MaxIntegerValue() { return std::is_integral<T>::value ? std::numeric_limits<T>::max(): size_t(2) << (std::numeric_limits<T>::digits - 1); } template <> constexpr size_t MaxIntegerValue<mshadow::half::half_t>() { return size_t(2) << 10; } } // namespace common } // namespace mxnet #endif // MXNET_COMMON_UTILS_H_
GB_unop__identity_uint64_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint64_fc32 // op(A') function: GB_unop_tran__identity_uint64_fc32 // C type: uint64_t // A type: GxB_FC32_t // cast: uint64_t cij = GB_cast_to_uint64_t ((double) crealf (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = GB_cast_to_uint64_t ((double) crealf (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = GB_cast_to_uint64_t ((double) crealf (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint64_fc32 ( uint64_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t ((double) crealf (aij)) ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint64_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB039-truedepsingleelement-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Data race pair: a[i]@62:5 vs. a[0]@62:15 */ #include <stdlib.h> #include <stdio.h> int main (int argc, char* argv[]) { int len=1000; int i; int a[1000]; a[0] = 2; #pragma omp parallel for schedule(dynamic) for (i=0;i<len;i++) a[i]=a[i]+a[0]; printf("a[500]=%d\n", a[500]); return 0; }
residualbased_newton_raphson_mpc_contact_strategy.h
// KRATOS ______ __ __ _____ __ __ __ // / ____/___ ____ / /_____ ______/ /_/ ___// /________ _______/ /___ ___________ _/ / // / / / __ \/ __ \/ __/ __ `/ ___/ __/\__ \/ __/ ___/ / / / ___/ __/ / / / ___/ __ `/ / // / /___/ /_/ / / / / /_/ /_/ / /__/ /_ ___/ / /_/ / / /_/ / /__/ /_/ /_/ / / / /_/ / / // \____/\____/_/ /_/\__/\__,_/\___/\__//____/\__/_/ \__,_/\___/\__/\__,_/_/ \__,_/_/ MECHANICS // // License: BSD License // license: ContactStructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_RESIDUALBASED_NEWTON_RAPHSON_MPC_CONTACT_STRATEGY) #define KRATOS_RESIDUALBASED_NEWTON_RAPHSON_MPC_CONTACT_STRATEGY /* System Includes */ /* External Includes */ /* Project includes */ #include "contact_structural_mechanics_application_variables.h" #include "includes/kratos_parameters.h" #include "includes/define.h" #include "includes/model_part.h" #include "includes/variables.h" // Strategies #include "solving_strategies/strategies/residualbased_newton_raphson_strategy.h" // Contact criteria #include "custom_strategies/custom_convergencecriterias/mpc_contact_criteria.h" // Utilities #include "utilities/variable_utils.h" #include "utilities/color_utilities.h" #include "utilities/math_utils.h" #include "utilities/atomic_utilities.h" // // Processes // #include "processes/fast_transfer_between_model_parts_process.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedNewtonRaphsonMPCContactStrategy * @ingroup ContactStructuralMechanicsApplication * @brief Contact Newton Raphson class * @details This class is a specialization of the Newton Raphson strategy with some custom modifications for contact problems * @author Vicente Mataix Ferrandiz */ template<class TSparseSpace, class TDenseSpace, // = DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class ResidualBasedNewtonRaphsonMPCContactStrategy : public ResidualBasedNewtonRaphsonStrategy< TSparseSpace, TDenseSpace, TLinearSolver > { public: ///@name Type Definitions ///@{ /** Counted pointer of ClassName */ KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedNewtonRaphsonMPCContactStrategy ); typedef SolvingStrategy<TSparseSpace, TDenseSpace> SolvingStrategyType; typedef ImplicitSolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType; typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef ResidualBasedNewtonRaphsonMPCContactStrategy<TSparseSpace, TDenseSpace, TLinearSolver> ClassType; typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType; typedef MPCContactCriteria<TSparseSpace, TDenseSpace> TMPCContactCriteriaType; typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType; typedef typename BaseType::TDataType TDataType; typedef TSparseSpace SparseSpaceType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef ModelPart::NodesContainerType NodesArrayType; typedef ModelPart::ElementsContainerType ElementsArrayType; typedef ModelPart::ConditionsContainerType ConditionsArrayType; typedef ModelPart::MasterSlaveConstraintContainerType ConstraintArrayType; typedef std::size_t IndexType; typedef std::size_t SizeType; /** * @brief Default constructor */ explicit ResidualBasedNewtonRaphsonMPCContactStrategy() { } /** * @brief Default constructor. (with parameters) * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ explicit ResidualBasedNewtonRaphsonMPCContactStrategy(ModelPart& rModelPart, Parameters ThisParameters) : BaseType(rModelPart) { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); } /** * @brief Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ explicit ResidualBasedNewtonRaphsonMPCContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})") ) : BaseType(rModelPart, pScheme, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ), mThisParameters(ThisParameters) { KRATOS_TRY; // We create the contact criteria mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * @brief Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ explicit ResidualBasedNewtonRaphsonMPCContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})") ) : BaseType(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag), mThisParameters(ThisParameters) { KRATOS_TRY; // We create the contact criteria mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * @brief Default constructor * @param rModelPart The model part of the problem * @param pScheme The integration scheme * @param pNewLinearSolver The linear solver employed * @param pNewConvergenceCriteria The convergence criteria employed * @param MaxIterations The maximum number of iterations * @param CalculateReactions The flag for the reaction calculation * @param ReformDofSetAtEachStep The flag that allows to compute the modification of the DOF * @param MoveMeshFlag The flag that allows to move the mesh */ explicit ResidualBasedNewtonRaphsonMPCContactStrategy( ModelPart& rModelPart, typename TSchemeType::Pointer pScheme, typename TLinearSolver::Pointer pNewLinearSolver, typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria, typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver, IndexType MaxIterations = 30, bool CalculateReactions = false, bool ReformDofSetAtEachStep = false, bool MoveMeshFlag = false, Parameters ThisParameters = Parameters(R"({})") ) : BaseType(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag ), mThisParameters(ThisParameters) { KRATOS_TRY; // We create the contact criteria mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>(); Parameters default_parameters = GetDefaultParameters(); mThisParameters.ValidateAndAssignDefaults(default_parameters); KRATOS_CATCH(""); } /** * Destructor. */ ~ResidualBasedNewtonRaphsonMPCContactStrategy() override = default; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Create method * @param rModelPart The model part of the problem * @param ThisParameters The configuration parameters */ typename SolvingStrategyType::Pointer Create( ModelPart& rModelPart, Parameters ThisParameters ) const override { return Kratos::make_shared<ClassType>(rModelPart, ThisParameters); } /** * @brief Operation to predict the solution ... if it is not called a trivial predictor is used in which the * values of the solution step of interest are assumed equal to the old values */ void Predict() override { KRATOS_TRY BaseType::Predict(); // Getting model part ModelPart& r_model_part = StrategyBaseType::GetModelPart(); // We get the system TSystemMatrixType& rA = *BaseType::mpA; TSystemVectorType& rDx = *BaseType::mpDx; TSystemVectorType& rb = *BaseType::mpb; // We solve the system in order to check the active set once TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); typename TSchemeType::Pointer p_scheme = BaseType::GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = BaseType::GetBuilderAndSolver(); p_builder_and_solver->BuildAndSolve(p_scheme, BaseType::GetModelPart(), rA, rDx, rb); // Check active set const SizeType echo_level_convergence_criteria = BaseType::mpConvergenceCriteria->GetEchoLevel(); BaseType::mpConvergenceCriteria->SetEchoLevel(0); mpMPCContactCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb); BaseType::mpConvergenceCriteria->SetEchoLevel(echo_level_convergence_criteria); KRATOS_CATCH("") } /** * @brief Initialization of member variables and prior operations */ void Initialize() override { KRATOS_TRY; // Computing nodal weights ComputeNodalWeights(); BaseType::Initialize(); KRATOS_CATCH(""); } /** * @brief The problem of interest is solved. * @details This function calls sequentially: Initialize(), InitializeSolutionStep(), Predict(), * SolveSolutionStep() and FinalizeSolutionStep(). * All those functions can otherwise be called separately. */ double Solve() override { this->Initialize(); this->InitializeSolutionStep(); this->Predict(); this->SolveSolutionStep(); this->FinalizeSolutionStep(); // TODO: Comment for proper work of interaction return 0.0; } /** * @brief Performs all the required operations that should be done (for each step) * before solving the solution step. * @details A member variable should be used as a flag to make sure this function is called only once per step. */ void InitializeSolutionStep() override { // Computing nodal weights ComputeNodalWeights(); BaseType::InitializeSolutionStep(); // // If enforcing NTN // const bool enforce_ntn = mThisParameters["enforce_ntn"].GetBool(); // if (enforce_ntn) { // EnforcingNTN(); // } } /** * @brief Performs all the required operations that should be done (for each step) * after solving the solution step. */ void FinalizeSolutionStep() override { KRATOS_TRY; BaseType::FinalizeSolutionStep(); KRATOS_CATCH(""); } /** * @brief Solves the current step. * @details This function returns true if a solution has been found, false otherwise. */ bool SolveSolutionStep() override { KRATOS_TRY; bool is_converged = false; // Getting model part ModelPart& r_model_part = StrategyBaseType::GetModelPart(); // We get the process info ProcessInfo& r_process_info = r_model_part.GetProcessInfo(); if (r_process_info.Is(INTERACTION)) { // We get the system TSystemMatrixType& rA = *BaseType::mpA; TSystemVectorType& rDx = *BaseType::mpDx; TSystemVectorType& rb = *BaseType::mpb; int inner_iteration = 0; const SizeType echo_level_convergence_criteria = BaseType::mpConvergenceCriteria->GetEchoLevel(); while (!is_converged && inner_iteration < mThisParameters["inner_loop_iterations"].GetInt()) { ++inner_iteration; if (echo_level_convergence_criteria > 0 && r_model_part.GetCommunicator().MyPID() == 0 ) { KRATOS_INFO("Simplified semi-smooth strategy") << BOLDFONT("INNER ITERATION: ") << inner_iteration << std::endl; } // We solve one loop r_process_info[NL_ITERATION_NUMBER] = 1; is_converged = AuxiliarSolveSolutionStep(); // We check the convergence if (r_process_info[NL_ITERATION_NUMBER] == 1) r_process_info[NL_ITERATION_NUMBER] = 2; // Trigger check is_converged = mpMPCContactCriteria->PostCriteria(r_model_part, BaseType::GetBuilderAndSolver()->GetDofSet(), rA, rDx, rb); if (echo_level_convergence_criteria > 0 && r_model_part.GetCommunicator().MyPID() == 0 ) { if (is_converged) KRATOS_INFO("Simplified semi-smooth strategy") << BOLDFONT("Simplified semi-smooth strategy. INNER ITERATION: ") << BOLDFONT(FGRN("CONVERGED")) << std::endl; else KRATOS_INFO("Simplified semi-smooth strategy") << BOLDFONT("INNER ITERATION: ") << BOLDFONT(FRED("NOT CONVERGED")) << std::endl; } } } else { is_converged = AuxiliarSolveSolutionStep(); } return is_converged; KRATOS_CATCH(""); } /** * @brief Solves the current step. This function returns true if a solution has been found, false otherwise. (auxiliar method) */ bool AuxiliarSolveSolutionStep() { // Getting flag INTERACTION ModelPart& r_model_part = StrategyBaseType::GetModelPart(); const bool update_each_nl_iteration = mThisParameters["update_each_nl_iteration"].GetBool(); VariableUtils().SetFlag(INTERACTION, update_each_nl_iteration, r_model_part.GetSubModelPart("ComputingContact").Conditions()); // Pointers needed in the solution typename TSchemeType::Pointer p_scheme = this->GetScheme(); typename TBuilderAndSolverType::Pointer p_builder_and_solver = this->GetBuilderAndSolver(); auto& r_dof_set = p_builder_and_solver->GetDofSet(); TSystemMatrixType& rA = *BaseType::mpA; TSystemVectorType& rDx = *BaseType::mpDx; TSystemVectorType& rb = *BaseType::mpb; // Initializing the parameters of the Newton-Raphson cycle unsigned int iteration_number = 1; r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number; bool is_converged = false; bool residual_is_updated = false; // Computing nodal weights ComputeNodalWeights(); p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); // // If enforcing NTN // const bool enforce_ntn = mThisParameters["enforce_ntn"].GetBool(); // if (enforce_ntn) { // EnforcingNTN(); // } // Function to perform the building and the solving phase. if (StrategyBaseType::mRebuildLevel > 0 || StrategyBaseType::mStiffnessMatrixIsBuilt == false) { TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); //Dx=0.00; TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } // Debugging info BaseType::EchoInfo(iteration_number); // Updating the results stored in the database BaseType::UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag()); p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); // Calculate reactions if required if (BaseType::mCalculateReactionsFlag) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); if (is_converged) { if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); } is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); } // Iteration Cycle... performed only for NonLinearProblems while (!is_converged && iteration_number++ < BaseType::mMaxIterationNumber) { // Setting the number of iteration r_model_part.GetProcessInfo()[NL_ITERATION_NUMBER] = iteration_number; // Computing nodal weights ComputeNodalWeights(); // Calling InitializeNonLinIteration p_scheme->InitializeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->InitializeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); // Shaping correctly the system if (update_each_nl_iteration) { p_builder_and_solver->SetUpDofSet(p_scheme, r_model_part); p_builder_and_solver->SetUpSystem(r_model_part); p_builder_and_solver->ResizeAndInitializeVectors(p_scheme, BaseType::mpA, BaseType::mpDx, BaseType::mpb, r_model_part); } is_converged = BaseType::mpConvergenceCriteria->PreCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); // Call the linear system solver to find the correction mDx for the it is not called if there is no system to solve if (SparseSpaceType::Size(rDx) != 0) { if (StrategyBaseType::mRebuildLevel > 1 || !StrategyBaseType::mStiffnessMatrixIsBuilt) { if (!BaseType::GetKeepSystemConstantDuringIterations()) { //A = 0.00; TSparseSpace::SetToZero(rA); TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildAndSolve(p_scheme, r_model_part, rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { TSparseSpace::SetToZero(rDx); TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHSAndSolve(p_scheme, r_model_part, rA, rDx, rb); } } else { KRATOS_WARNING("NO DOFS") << "ATTENTION: no free DOFs!! " << std::endl; } // Debugging info BaseType::EchoInfo(iteration_number); // Updating the results stored in the database BaseType::UpdateDatabase(rA, rDx, rb, StrategyBaseType::MoveMeshFlag()); p_scheme->FinalizeNonLinIteration(r_model_part, rA, rDx, rb); BaseType::mpConvergenceCriteria->FinalizeNonLinearIteration(r_model_part, r_dof_set, rA, rDx, rb); residual_is_updated = false; // Calculate reactions if required if (BaseType::mCalculateReactionsFlag) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); if (is_converged) { if (BaseType::mpConvergenceCriteria->GetActualizeRHSflag()) { TSparseSpace::SetToZero(rb); p_builder_and_solver->BuildRHS(p_scheme, r_model_part, rb); residual_is_updated = true; } is_converged = BaseType::mpConvergenceCriteria->PostCriteria(r_model_part, p_builder_and_solver->GetDofSet(), rA, rDx, rb); } } // Plots a warning if the maximum number of iterations is exceeded if (iteration_number >= BaseType::mMaxIterationNumber) { BaseType::MaxIterationsExceeded(); } else { KRATOS_INFO_IF("NR-Strategy", this->GetEchoLevel() > 0) << "Convergence achieved after " << iteration_number << " / " << BaseType::mMaxIterationNumber << " iterations" << std::endl; } // Recalculate residual if needed (note that some convergence criteria need it to be recalculated) if (!residual_is_updated) { // NOTE: // The following part will be commented because it is time consuming // and there is no obvious reason to be here. If someone need this // part please notify the community via mailing list before uncommenting it. // Pooyan. // TSparseSpace::SetToZero(mb); // p_builder_and_solver->BuildRHS(p_scheme, r_model_part, mb); } // Calculate reactions if required if (BaseType::mCalculateReactionsFlag) p_builder_and_solver->CalculateReactions(p_scheme, r_model_part, rA, rDx, rb); return is_converged; } /** * @brief This method returns the defaulr parameters in order to avoid code duplication * @return Returns the default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "newton_raphson_mpc_contact_strategy", "inner_loop_iterations" : 5, "update_each_nl_iteration" : false, "enforce_ntn" : false })" ); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "newton_raphson_mpc_contact_strategy"; } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ Parameters mThisParameters; /// The configuration parameters typename TConvergenceCriteriaType::Pointer mpMPCContactCriteria; /// The contact criteria ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method assigns settings to member variables * @param ThisParameters Parameters that are assigned to the member variables */ void AssignSettings(const Parameters ThisParameters) override { BaseType::AssignSettings(ThisParameters); // We create the contact criteria mpMPCContactCriteria = Kratos::make_shared<TMPCContactCriteriaType>(); // Copy the parameters mThisParameters = ThisParameters; } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@{ /** * Copy constructor. */ ResidualBasedNewtonRaphsonMPCContactStrategy(const ResidualBasedNewtonRaphsonMPCContactStrategy& Other) { }; private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ // /** // * @brief This inforces NTN formulation // */ // void EnforcingNTN() // { // // List of enforced nodes to not repeat // std::unordered_set<IndexType> enforced_nodes; // // // Getting contact model part // ModelPart& r_root_model_part = StrategyBaseType::GetModelPart().GetRootModelPart(); // ModelPart& r_computing_contact_model_part = StrategyBaseType::GetModelPart().GetSubModelPart("ComputingContact"); // // // The process info // const auto& r_process_info = r_root_model_part.GetProcessInfo(); // // // Reset the pointers of the conditions // for (auto& r_cond : r_computing_contact_model_part.Conditions()) { // if (r_cond.Has(CONSTRAINT_POINTER)) { // r_cond.SetValue(CONSTRAINT_POINTER, nullptr); // } // } // // // Iterate over the constraints // IndexType counter = 1; // for (auto& r_const : r_root_model_part.MasterSlaveConstraints()) { // r_const.SetId(counter); // ++counter; // } // // // Auxiliar classes // Matrix original_relation_matrix, relation_matrix; // Vector original_constant_vector, constant_vector; // ModelPart::DofsVectorType original_master_dofs, master_dofs, original_slave_dofs, slave_dofs; // // // Iterate over the constraints // for (auto& r_const : r_computing_contact_model_part.MasterSlaveConstraints()) { // // Getting original system // r_const.GetLocalSystem(original_relation_matrix, original_constant_vector, r_process_info); // r_const.GetDofList(original_slave_dofs, original_master_dofs, r_process_info); // // // TODO: Finish rebuild // // // Creating new constraint // r_root_model_part.CreateNewMasterSlaveConstraint("LinearMasterSlaveConstraint", counter, master_dofs, slave_dofs, relation_matrix, constant_vector); // // // Setting to remove the old constraints // r_const.Set(TO_ERASE, true); // // ++counter; // } // // // Remove old constraints // r_root_model_part.RemoveMasterSlaveConstraintsFromAllLevels(TO_ERASE); // // // Transfer constraints from the root to the computing model part // FastTransferBetweenModelPartsProcess(r_computing_contact_model_part, r_root_model_part, FastTransferBetweenModelPartsProcess::EntityTransfered::CONSTRAINTS).Execute(); // // // Reorder ids // counter = 1; // for (auto& r_const : r_root_model_part.MasterSlaveConstraints()) { // r_const.SetId(counter); // ++counter; // } // } /** * @brief This computes the nodal weights */ void ComputeNodalWeights() { // Getting contact model part ModelPart& r_contact_model_part = StrategyBaseType::GetModelPart().GetSubModelPart("Contact"); // Reset the NODAL_PAUX and NODAL_MAUX auto& r_nodes_array = r_contact_model_part.Nodes(); VariableUtils().SetNonHistoricalVariableToZero(NODAL_PAUX, r_nodes_array); VariableUtils().SetNonHistoricalVariableToZero(NODAL_MAUX, r_nodes_array); // We set the constraints active and inactive in function of the active set auto& r_conditions_array = r_contact_model_part.Conditions(); auto it_cond_begin = r_conditions_array.begin(); // If enforcing NTN const bool enforce_ntn = false; // const bool enforce_ntn = mThisParameters["enforce_ntn"].GetBool(); // if (enforce_ntn) { // VariableUtils().SetNonHistoricalVariable(NODAL_PAUX, 1.0, r_nodes_array); // } #pragma omp parallel for for(int i = 0; i < static_cast<int>(r_conditions_array.size()); ++i) { auto it_cond = it_cond_begin + i; // Only slave conditions if (it_cond->Is(SLAVE)) { auto& r_geometry = it_cond->GetGeometry(); Vector lumping_factor; lumping_factor = r_geometry.LumpingFactors(lumping_factor); const double domain_size = r_geometry.DomainSize(); for (IndexType i_node = 0; i_node < r_geometry.size(); ++i_node) { auto& r_node = r_geometry[i_node]; if (!enforce_ntn) { AtomicAdd(r_node.GetValue(NODAL_PAUX), 1.0); } AtomicAdd(r_node.GetValue(NODAL_MAUX), lumping_factor[i_node] * domain_size); } } } } ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedNewtonRaphsonMPCContactStrategy */ ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ ///@} } // namespace Kratos #endif /* KRATOS_RESIDUALBASED_NEWTON_RAPHSON_MPC_CONTACT_STRATEGY */
readbw.c
/****************************************************************************** ** Copyright (c) 2013-2018, Alexander Heinecke ** ** All rights reserved. ** ** ** ** Redistribution and use in source and binary forms, with or without ** ** modification, are permitted provided that the following conditions ** ** are met: ** ** 1. Redistributions of source code must retain the above copyright ** ** notice, this list of conditions and the following disclaimer. ** ** 2. Redistributions in binary form must reproduce the above copyright ** ** notice, this list of conditions and the following disclaimer in the ** ** documentation and/or other materials provided with the distribution. ** ** 3. Neither the name of the copyright holder nor the names of its ** ** contributors may be used to endorse or promote products derived ** ** from this software without specific prior written permission. ** ** ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** ******************************************************************************/ #if 0 #define USE_UNCORE_PERF_COUNTERS #if 0 #define USE_DRAM_COUNTERS #endif #endif #if 0 #define USE_CORE_PERF_COUNTERS #endif #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #if defined(USE_UNCORE_PERF_COUNTERS) || defined(USE_CORE_PERF_COUNTERS) #include "./../common/perf_counter_markers.h" #endif #ifndef STREAM_ARRAY_SIZE # define STREAM_ARRAY_SIZE 100000 #endif #ifdef NTIMES #if NTIMES<=1 # define NTIMES 1000 #endif #endif #ifndef NTIMES # define NTIMES 1000 #endif # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif inline double sec(struct timeval start, struct timeval end) { return ((double)(((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)))) / 1.0e6; } int main(int argc, char* argv[]) { double* l_data; size_t l_n = 0; size_t l_i = 0; double* l_times; double l_result; double l_avgTime, l_minTime, l_maxTime; double l_size = (double)((size_t)STREAM_ARRAY_SIZE)*sizeof(double); double l_sum = (double)((size_t)STREAM_ARRAY_SIZE); struct timeval l_startTime, l_endTime; #ifdef USE_UNCORE_PERF_COUNTERS ctrs_uncore a, b, s; bw_gibs bw_min, bw_max, bw_avg; llc_victims llc_vic_min, llc_vic_max, llc_vic_avg; #ifdef USE_DRAM_COUNTERS setup_uncore_ctrs( CTRS_EXP_DRAM_CAS ); #else setup_uncore_ctrs( CTRS_EXP_CHA_LLC_LOOKUP_VICTIMS ); #endif zero_uncore_ctrs( &a ); zero_uncore_ctrs( &b ); zero_uncore_ctrs( &s ); #endif #ifdef USE_CORE_PERF_COUNTERS ctrs_core a, b, s; bw_gibs bw_min, bw_max, bw_avg; setup_core_ctrs( CTRS_EXP_L2_BW ); zero_core_ctrs( &a ); zero_core_ctrs( &b ); zero_core_ctrs( &s ); #endif l_sum = ((l_sum*l_sum) + l_sum)/2; posix_memalign((void**)&l_data, 4096, ((size_t)STREAM_ARRAY_SIZE)*sizeof(double)); l_times = (double*)malloc(sizeof(double)*NTIMES); printf("READ BW Test Size MiB: %f\n", (l_size/(1024.0*1024.0))); // init data #pragma omp parallel for for ( l_n = 0; l_n < STREAM_ARRAY_SIZE; l_n++ ) { l_data[l_n] = (double)l_n; } #ifdef USE_UNCORE_PERF_COUNTERS read_uncore_ctrs( &a ); #endif #ifdef USE_CORE_PERF_COUNTERS read_core_ctrs( &a ); #endif // run benchmark for( l_i = 0; l_i < NTIMES; l_i++ ) { l_result = 0.0; gettimeofday(&l_startTime, NULL); // we do manual reduction here as we don't rely on a smart OpenMP implementation #pragma omp parallel { double l_res = 0.0; #pragma omp for for ( l_n = 0; l_n < STREAM_ARRAY_SIZE; l_n++ ) { l_res += l_data[l_n]; } #pragma omp atomic l_result += l_res; } gettimeofday(&l_endTime, NULL); l_times[l_i] = sec(l_startTime, l_endTime); } #ifdef USE_UNCORE_PERF_COUNTERS read_uncore_ctrs( &b ); difa_uncore_ctrs( &a, &b, &s ); divi_uncore_ctrs( &s, NTIMES ); #endif #ifdef USE_CORE_PERF_COUNTERS read_core_ctrs( &b ); difa_core_ctrs( &a, &b, &s ); divi_core_ctrs( &s, NTIMES ); #endif // postprocess timing l_avgTime = 0.0; l_minTime = 100000.0; l_maxTime = 0.0; for( l_i = 0; l_i < NTIMES; l_i++ ) { l_avgTime += l_times[l_i]; l_minTime = MIN(l_minTime, l_times[l_i]); l_maxTime = MAX(l_maxTime, l_times[l_i]); } l_avgTime /= (double)NTIMES; // output printf("AVG GiB/s (calculated): %f\n", (l_size/(1024.0*1024.0*1024.0))/l_avgTime); printf("MAX GiB/s (calculated): %f\n", (l_size/(1024.0*1024.0*1024.0))/l_minTime); printf("MIN GiB/s (calculated): %f\n", (l_size/(1024.0*1024.0*1024.0))/l_maxTime); #ifdef USE_UNCORE_PERF_COUNTERS #ifdef USE_DRAM_COUNTERS get_cas_ddr_bw_uncore_ctrs( &s, l_maxTime, &bw_min ); get_cas_ddr_bw_uncore_ctrs( &s, l_minTime, &bw_max ); get_cas_ddr_bw_uncore_ctrs( &s, l_avgTime, &bw_avg ); printf("AVG GiB/s (uncore ctrs): %f\n", bw_avg.rd); printf("MAX GiB/s (uncore ctrs): %f\n", bw_max.rd); printf("MIN GiB/s (uncore ctrs): %f\n", bw_min.rd); #else get_llc_victim_bw_uncore_ctrs( &s, l_maxTime, &llc_vic_max ); get_llc_victim_bw_uncore_ctrs( &s, l_minTime, &llc_vic_min ); get_llc_victim_bw_uncore_ctrs( &s, l_avgTime, &llc_vic_avg ); printf("AVG GiB/s (uncore ctrs): %f\n", llc_vic_avg.rd_bw); printf("MAX GiB/s (uncore ctrs): %f\n", llc_vic_max.rd_bw); printf("MIN GiB/s (uncore ctrs): %f\n", llc_vic_min.rd_bw); #endif #endif #ifdef USE_CORE_PERF_COUNTERS get_l2_bw_core_ctrs( &s, l_maxTime, &bw_min ); get_l2_bw_core_ctrs( &s, l_minTime, &bw_max ); get_l2_bw_core_ctrs( &s, l_avgTime, &bw_avg ); printf("AVG GiB/s (IN L2): %f\n", bw_avg.rd); printf("MAX GiB/s (IN L2): %f\n", bw_max.rd); printf("MIN GiB/s (IN L2): %f\n", bw_min.rd); printf("AVG GiB/s (OUTS L2): %f\n", bw_avg.wr); printf("MAX GiB/s (OUTS L2): %f\n", bw_max.wr); printf("MIN GiB/s (OUTS L2): %f\n", bw_min.wr); printf("AVG GiB/s (OUTNS L2): %f\n", bw_avg.wr2); printf("MAX GiB/s (OUTNS L2): %f\n", bw_max.wr2); printf("MIN GiB/s (OUTNS L2): %f\n", bw_min.wr2); printf("AVG GiB/s (DEM L2): %f\n", bw_avg.wr3); printf("MAX GiB/s (DEM L2): %f\n", bw_max.wr3); printf("MIN GiB/s (DEM L2): %f\n", bw_min.wr3); printf("AVG GiB/s (DROP L2): %f\n", bw_avg.wr4); printf("MAX GiB/s (DROP L2): %f\n", bw_max.wr4); printf("MIN GiB/s (DROP L2): %f\n", bw_min.wr4); #endif if((l_result/l_sum)-1 < 1e-10) { printf("PASSED, %f\n", (l_result/l_sum)-1); } else { printf("FAILED, %f, %f, %f\n", l_sum, l_result, (l_result/l_sum)-1); } return 0; }
HOG2.h
#ifndef HOG2_H #define HOG2_H #include "../../Assertions.h" #include "../ImageChannel.h" #include "../ImagePoint.h" #include "../Derivative.h" #include "../../math/statistics/Histogram.h" #include "../Kernel.h" #include "../KernelFactory.h" #include "../../geo/Size2.h" #include "../../geo/Point2.h" namespace K { /** * calculate the histogram-of-gradients at a given * location using a provided region (size) * * // http://stackoverflow.com/questions/32417531/hog-what-is-done-in-the-contrast-normalization-step * // http://www.geocities.ws/talh_davidc/#cst_extract * * - no smoothing beforehand! * - [0:180] degree region! * - a 270 degree gradient is the same as a 90 degree gradient -> modulo * */ class HOG2 { private: struct HOGGradient { float magnitude; float direction; // in radians [0:2pi] 0 = left, pi/2 = up }; // get half the given value. rounded down! static inline int half(const int i) {return i/2;} public: enum Pattern { RECTANGULAR, CIRCULAR, }; struct CellPoint : public ImagePoint { float impact; // from gaussian, to downweight edge-pixels CellPoint(const int x, const int y, const float impact) : ImagePoint(x,y), impact(impact) {;} }; struct Contribution { int bin; float weight; Contribution() : bin(0), weight(0) {;} Contribution(int bin, float weight) : bin(bin), weight(weight) {;} }; struct Contributions { Contribution c1; Contribution c2; Contributions(const Contribution c1, const Contribution c2) : c1(c1), c2(c2) {;} }; /** helper class to describe the feature-area based on the HOG settings */ struct Area { // upper left coordinate for the area-of-interest int sx; int sy; // first [upper left] block's center int cx; int cy; // number of x and y blocks within the window int wx; int wy; const Size2i blockStride; Area(const Point2i pos, const Size2i blockSize, const Size2i winSize, const Size2i blockStride) : blockStride(blockStride) { // upper left coordinate for the area-of-interest sx = pos.x - half(winSize.w); sy = pos.y - half(winSize.h); // first [upper left] block's center cx = sx + half(blockSize.w); cy = sy + half(blockSize.h); // number of x and y blocks within the window wx = ((winSize.w - blockSize.w) / blockStride.w) + 1; wy = ((winSize.h - blockSize.h) / blockStride.h) + 1; } /** get the center for the nx-th/ny-th block */ Point2i getBlockCenter(const int nx, const int ny) const { return Point2i( cx + nx*blockStride.w, cy + ny*blockStride.h ); } }; struct Vector : public std::vector<float> { Vector() { ; } Vector(const size_t size) { this->reserve(size); } /** ensure the vector has a length of 1 */ void normalize() { float length = 0; for (float f : *this) {length += f*f;} length += 0.2f; // this constant serves two purposes: prevent length = 0, and prevent near-0 vectors from getting too long length = std::sqrt(length); for (float& f : *this) {f /= length;} } float length() const { float length = 0; for (float f : *this) {length += f*f;} return std::sqrt(length); } float distance(const Vector& o) const { float sum = 0; for (size_t i = 0; i < size(); ++i) { const float d = (*this)[i] - o[i]; sum += d*d; } return std::sqrt(sum); } }; private: /** the size for each cell [cell is the smallest element] */ const Size2i cellSize; /** the number of bins to use within each cell */ const int bins; /** number of degrees per bin */ const float degPerBin; /** currently we pre-calculate everything at pixel-level [highest accuracy] */ const int stride = 1; /** the size for each block [containing several cells]. must be a multiple of the cellSize */ const Size2i blockSize; /** number of float-values per cell */ const int valuesPerCell; /** number of float-values per block */ const int valuesPerBlock; /** sigma to (slightly) downweight edge pixels */ const float sigma = 5.0f; /** downweight each block's edge pixels [more importance to the center] */ K::Kernel gauss; /** histogram for each cell */ DataMatrix<Vector> cells; /** histogram for each block [multiple cells] */ DataMatrix<Vector> blocks; public: /** ctor */ HOG2(const ImageChannel& img, const Size2i cellSize = Size2i(8,8), const int bins = 9, const Size2i blockSize = Size2i(16,16)) : cellSize(cellSize), bins(bins), degPerBin(180.0f / (float)bins), blockSize(blockSize), valuesPerCell(bins), valuesPerBlock(valuesPerCell*(blockSize.w/cellSize.w)*(blockSize.h/cellSize.h)) { if (blockSize.w != blockSize.h) {throw Exception("currently, only square blocks are supported");} // perform some sanity checks if (blockSize.w % cellSize.w != 0) {throw Exception("blockSize must be a multiple of cellSize");} if (blockSize.h % cellSize.h != 0) {throw Exception("blockSize must be a multiple of cellSize");} // TODO gauss = K::KernelFactory::gauss2D(0.5, cellSize.w); // TODO: searching stride? (currently 1px, but requires many [unnecessary] calculations) precalc(img); } /** get the histogram for the cell around [=centered at] (x,y) */ const Vector& getCell(const int x, const int y) const { //if (x % stride != 0) {throw Exception("x-coordinate must be a multiple of the stride-size");} //if (y % stride != 0) {throw Exception("y-coordinate must be a multiple of the stride-size");} if ((x < cellSize.w / 2) || (y < cellSize.h / 2)) {throw Exception("block position out of bounds");} return cells.getConstRef(x/stride, y/stride); } /** get the historgram for the block around [=centered at] (x,y) */ const Vector& getBlock(const int x, const int y) const { //if (x % stride != 0) {throw Exception("x-coordinate must be a multiple of the stride-size");} //if (y % stride != 0) {throw Exception("y-coordinate must be a multiple of the stride-size");} if ((x < blockSize.w / 2) || (y < blockSize.h / 2)) {throw Exception("window position out of bounds");} return blocks.getConstRef(x/stride, y/stride); } /** get a feature-vector for the given location (x,y) = center and size(w,h) */ Vector getFeature(const Point2i pos, const Size2i winSize, const Size2i blockStride = Size2i(8,8)) const { //const int x = pos.x; //const int y = pos.y; const int w = winSize.w; const int h = winSize.h; // sanity checks //if (x % stride != 0) {throw Exception("x-coordinate must be a multiple of the stride-size");} //if (y % stride != 0) {throw Exception("y-coordinate must be a multiple of the stride-size");} if (w % cellSize.w != 0) {throw Exception("window-width must be a multiple of the cell-width");} if (h % cellSize.h != 0) {throw Exception("window-height must be a multiple of the cell-height");} if ((winSize.w - blockSize.w) % blockStride.w != 0) {throw Exception("err");} if ((winSize.h - blockSize.h) % blockStride.h != 0) {throw Exception("err");} //if (windowSize != 2*blockSize) {throw Exception("not yet supported!");} const Area a = Area(pos, blockSize, winSize, blockStride); const size_t reserve = a.wx*a.wy*valuesPerBlock; //Vector feature(reserve); Vector feature; feature.resize(reserve); float* data = feature.data(); for (int y = 0; y < a.wy; ++y) { for (int x = 0; x < a.wx; ++x) { const Point2i pt = a.getBlockCenter(x, y); const Vector& block = getBlock(pt.x, pt.y); _assertNot0(block.size(), "invalid number of values in window detected"); //feature.insert(feature.end(), block.begin(), block.end()); memcpy(data, block.data(), block.size()*sizeof(float)); data += block.size(); } } _assertEqual(reserve, feature.size(), "invalid feature size"); return feature; } public: // FOR TESTING /** convert from degress to bin number [float!] */ inline float degToBin(const float deg) const { // sanity check if (deg < 0) {throw Exception("degrees out of bounds");} if (deg > 360) {throw Exception("degrees out of bounds");} return deg / degPerBin; } /** convert orientation + magnitude to a bin-contribution */ Contributions getContribution(const float deg, const float mag) const { const float bin = degToBin(deg); Contribution c1, c2; c1.bin = (int) std::floor(bin); c2.bin = (int) std::ceil(bin); const float alpha = (bin - (float)c1.bin); c1.bin %= bins; c2.bin %= bins; c1.weight = mag * (1.0f - alpha); c2.weight = mag * (alpha); return Contributions(c1,c2); } private: inline float atan360(const float dy, const float dx) const { const float rad = std::atan2(dy, dx); return (rad >= 0.0) ? (rad) : (2.0f*(float)M_PI+rad); } /** perform one-time calculations for fast lookups */ void precalc(const ImageChannel& img) { buildCells(img); buildBlocks(img); } /** * step1) * calculate HOG cells [usually 8x8] around each "pixel" of the input image * TODO: do not calculate for each pixel [++i] but for [i+=stride] * that will be used during the matching phase. this is less accurate but faster */ void buildCells(const ImageChannel& img) { const int w = img.getWidth(); const int h = img.getHeight(); // number of blocks to calculate const int nx = img.getWidth() / stride; const int ny = img.getHeight() / stride; // get derivative images (x and y) const K::ImageChannel imgX = Derivative::getXcen(img); // [-1: 0: +1] const K::ImageChannel imgY = Derivative::getYcen(img); // [-1: 0: +1] // buffer containing HOG-Block-Histogram for every stride-th-pixel within the image cells = DataMatrix<Vector>(nx, ny); // list of all pixels that belong to a HOG-window (centered at 0,0) const std::vector<CellPoint> region = getCellPoints(Pattern::RECTANGULAR); // border to skip [half block size] const int w2 = half(cellSize.w); const int h2 = half(cellSize.h); // build HOG-Histogram for each block centered at (x,y) with stride-th increment for (int y = h2; y <= h-h2; y += stride) { #pragma omp parallel for for (int x = w2; x <= w-w2; x += stride) { cells.set(x/stride, y/stride, getHistogram(imgX, imgY, x,y, region)); } } } /** * step2) * calculate HOG blocks [=several cells] [usually 16x16 (or 2x2 cells)] around each "pixel" of the input image */ void buildBlocks(const ImageChannel& img) { //if (windowSize != 2*blockSize) {throw Exception("not yet supported!");} const int w = img.getWidth(); const int h = img.getHeight(); // number of windows to calculate const int nx = img.getWidth() / stride; const int ny = img.getHeight() / stride; // buffer containing HOG-Window-Vector for every stride-th-pixel within the image blocks = DataMatrix<Vector>(nx, ny); const int bsw2 = half(blockSize.w); const int bsh2 = half(blockSize.h); // build combined/normalized Histogram for each Window centered at (x,y) for (int y = bsh2; y <= h-bsh2; y += stride) { #pragma omp parallel for for (int x = bsw2; x <= w-bsw2; x += stride) { // upper left coordinate for the area-of-interest const int sx = x - half(blockSize.w); const int sy = y - half(blockSize.h); // first block's center const int cx = sx + half(cellSize.w); const int cy = sy + half(cellSize.h); // number of cells within each block const int cellsX = blockSize.w / cellSize.w; const int cellsY = blockSize.h / cellSize.h; // build the block const size_t reserve = cellsX*cellsY*bins; //Vector block(reserve); Vector block; block.resize(reserve); float* data = block.data(); // fetch each cell that belongs to the block for (int y1 = 0; y1 < cellsY; ++y1) { for (int x1 = 0; x1 < cellsX; ++x1) { const int xx = cx + x1*cellSize.w; const int yy = cy + y1*cellSize.h; const Vector& cell = getCell(xx, yy); //block.insert(block.end(), cell.begin(), cell.end()); memcpy(data, cell.data(), cell.size()*sizeof(float)); data += cell.size(); } } _assertEqual(reserve, block.size(), "invalid number of entries in block"); // normalize the window block.normalize(); // store blocks.set(x/stride, y/stride, block); } } } // /** convert gradients to histogram */ // Vector getHistogram(const std::vector<HOGGradient>& gradients) { // Vector res(bins); // res.resize(bins); // for (const HOGGradient& hg : gradients) { // const float deg = hg.direction * 180.0f / (float)M_PI; // const Contributions c = getContribution(deg, hg.magnitude); // if (1 == 1) { // res[c.c1.bin] += c.c1.weight; // split contribution // res[c.c2.bin] += c.c2.weight; // } else { // res[c.c1.bin] += c.c1.weight; // both to the same bin // res[c.c1.bin] += c.c2.weight; // both to the same bin // } // } // return res; // } // /** get all individual gradients at the given location */ // std::vector<HOGGradient> getGradients(const K::ImageChannel& imgX, const K::ImageChannel& imgY, const int x, const int y, const std::vector<ImagePoint>& region) const { // std::vector<HOGGradient> gradients(bins); // for (size_t i = 0; i < region.size(); ++i) { // const ImagePoint p = region[i]; // // point within the image // const int x1 = x+p.x; // const int y1 = y+p.y; // // clamp // if (x1 < 0 || x1 >= imgX.getWidth()) {continue;} // if (y1 < 0 || y1 >= imgX.getHeight()) {continue;} // // calculate the centered derivatives // const auto dx = imgX.get(x1, y1); // gradient's magnitude in x direction // const auto dy = imgY.get(x1, y1); // gradient's magnitude in y direction // // calculate magnitude and direction of the gradient // HOGGradient grad; // grad.magnitude = std::sqrt( (dx*dx) + (dy*dy) ); // gradient's overall magnitude // grad.direction = atan360(dy, dx); // the gradient's direction // gradients.push_back(grad); // } // return gradients; // } /** get all individual gradients at the given location */ Vector getHistogram(const K::ImageChannel& imgX, const K::ImageChannel& imgY, const int x, const int y, const std::vector<CellPoint>& region) const { // output histogram Vector res; res.resize(bins); // process each pixel for (size_t i = 0; i < region.size(); ++i) { const CellPoint p = region[i]; // point within the image const int x1 = x+p.x; const int y1 = y+p.y; // clamp if (x1 < 0 || x1 >= imgX.getWidth()) {continue;} if (y1 < 0 || y1 >= imgX.getHeight()) {continue;} // calculate the centered derivatives const auto dx = imgX.get(x1, y1); // gradient's magnitude in x direction const auto dy = imgY.get(x1, y1); // gradient's magnitude in y direction // calculate magnitude and direction of the gradient const float mag = std::sqrt( (dx*dx) + (dy*dy) ); // gradient's overall magnitude const float dir = atan360(dy, dx); // the gradient's direction in radians const float deg = dir * 180.0f / (float)M_PI; // in degree // calculate bin-contribution [max 2 bins] // hereby add the impact factor based on the blur-window const Contributions c = getContribution(deg, mag * p.impact); if (1 == 1) { res[c.c1.bin] += c.c1.weight; // split contribution res[c.c2.bin] += c.c2.weight; } else { res[c.c1.bin] += c.c1.weight; // both to the same bin res[c.c1.bin] += c.c2.weight; // both to the same bin } } // done return res; } public: /** a list of all pixels within a cell. (0,0) = cell-center */ std::vector<CellPoint> getCellPoints(const Pattern p) const { std::vector<CellPoint> region; CellPoint dst(0,0,0); const ImagePoint center(0,0); if (p == CIRCULAR && cellSize.w != cellSize.h) { throw Exception("CIRCULAR pattern requires cellSize.w == cellSize.h"); } const int sw2 = half(cellSize.w); const int sh2 = half(cellSize.w); // if the cell-size is odd, the end is included -> +1 const int ew2 = sw2 + ( (cellSize.w % 2 == 1) ? 1 : 0); const int eh2 = sh2 + ( (cellSize.h % 2 == 1) ? 1 : 0); // process a square region... for (dst.y = -sh2; dst.y < +eh2; ++dst.y) { for (dst.x = -sw2; dst.x < +ew2; ++dst.x) { // impact factor based on sigma and distance from center // ensure the impact pattern is symmetric, also for even block sizes [no real center] const float dx = (float)dst.x + ( (cellSize.w % 2 == 0) ? 0.5f : 0.0f ); const float dy = (float)dst.y + ( (cellSize.h % 2 == 0) ? 0.5f : 0.0f ); dst.impact = std::exp( - ((dx*dx) + (dy*dy)) / (2.0f*sigma*sigma) ); // pattern? if (p == RECTANGULAR) { region.push_back(dst); } else if (p == CIRCULAR) { // ...but use only points within a radius around the center const float d = center.distance(dst); if (d <= cellSize.w) {region.push_back(dst);} } std::cout << dst.x << "," << dst.y << ":" << dst.impact << "\t"; } std::cout << std::endl; } return region; } }; } #endif // HOG2_H
openMP.c
#include <stdio.h> #include <stdbool.h> #include <stdlib.h> #include "omp.h" // Set the matrix sixe, number of threads and chunk size const int MATRIX_SIZE = 8192; const int NUMBER_OF_THREADS = 128; const int CHUNK_SIZE = 2; //iterate through the array and print the matrix in matrix readable fashion void printSquareMatrixInt(int n, int** matrix) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { printf("%d \t", matrix[i][j]); } printf("\n"); } } void naiveSolutionInt(int** matrix) { int temp; for (int i = 0; i < MATRIX_SIZE - 1; i++) { for (int j = i + 1; j < MATRIX_SIZE; j++) { temp = matrix[i][j]; matrix[i][j] = matrix[j][i]; matrix[j][i] = temp; } } } void openMPsolution(int** matrix) { int i, j, temp; #pragma omp parallel shared(matrix) private(temp, i, j) { #pragma omp for schedule(dynamic, CHUNK_SIZE) nowait for (i = 0; i < MATRIX_SIZE - 1; i++) { for (j = i + 1; j < MATRIX_SIZE; j++) { temp = matrix[i][j]; matrix[i][j] = matrix[j][i]; matrix[j][i] = temp; } } } } int main() { omp_set_num_threads(NUMBER_OF_THREADS); // Create dynamic array and populate it int ** matrix; matrix = malloc(MATRIX_SIZE * sizeof(int *)); for(int i = 0; i < MATRIX_SIZE; i++) matrix[i] = (int *)malloc(MATRIX_SIZE * sizeof(int)); int count = 0; for(int i = 0; i < MATRIX_SIZE; i++) { for(int j = 0; j < MATRIX_SIZE; j++) { matrix[i][j] = count; count++; } } printf("Size %c %c: %d\n", ' ',' ', MATRIX_SIZE); printf("Threads %c: %d\n", ' ', NUMBER_OF_THREADS ); double dtime; // printSquareMatrixInt(MATRIX_SIZE, matrix); dtime = omp_get_wtime(); naiveSolutionInt(matrix); dtime = omp_get_wtime() - dtime; printf("Naive %c %c: %f\n", ' ',' ', dtime); // printf(" \n --------------------------------------- \n"); // printSquareMatrixInt(MATRIX_SIZE, matrix); dtime = omp_get_wtime(); openMPsolution(matrix); dtime = omp_get_wtime() - dtime; printf("Parallel : %f\n", dtime); // printf(" \n --------------------------------------- \n"); // printSquareMatrixInt(MATRIX_SIZE, matrix); }
GB_binop__min_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__min_fp64) // A.*B function (eWiseMult): GB (_AemultB_01__min_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__min_fp64) // A.*B function (eWiseMult): GB (_AemultB_03__min_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__min_fp64) // A*D function (colscale): GB (_AxD__min_fp64) // D*A function (rowscale): GB (_DxB__min_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__min_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__min_fp64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_fp64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_fp64) // C=scalar+B GB (_bind1st__min_fp64) // C=scalar+B' GB (_bind1st_tran__min_fp64) // C=A+scalar GB (_bind2nd__min_fp64) // C=A'+scalar GB (_bind2nd_tran__min_fp64) // C type: double // A type: double // B,b type: double // BinaryOp: cij = fmin (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = fmin (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_FP64 || GxB_NO_MIN_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__min_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__min_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__min_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__min_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__min_fp64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__min_fp64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__min_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__min_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__min_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__min_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__min_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__min_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = fmin (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__min_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = fmin (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = fmin (x, aij) ; \ } GrB_Info GB (_bind1st_tran__min_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = fmin (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__min_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3720.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "correlation.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < m; i++) for (j = 0; j < n; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_correlation(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m), DATA_TYPE POLYBENCH_1D(stddev,M,m)) { int i, j, j1, j2; DATA_TYPE eps = 0.1f; #define sqrt_of_array_cell(x,j) sqrt(x[j]) #pragma scop /* Determine mean of column vectors of input data matrix */ #pragma omp parallel private(i, j, j2) num_threads(#P11) { #pragma omp parallel for schedule(static, 1) simd for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Determine standard deviations of column vectors of data matrix. */ #pragma omp parallel for schedule(static, 1) simd for (j = 0; j < _PB_M; j++) { stddev[j] = 0.0; for (i = 0; i < _PB_N; i++) stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]); stddev[j] /= float_n; stddev[j] = sqrt_of_array_cell(stddev, j); /* The following in an inelegant but usual way to handle near-zero std. dev. values, which below would cause a zero- divide. */ stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j]; } /* Center and reduce the column vectors. */ #pragma omp parallel for schedule(static, 1) simd for (i = 0; i < _PB_N; i++) { #pragma omp target teams distribute #p #p for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; data[i][j] /= sqrt(float_n) * stddev[j]; } } /* Calculate the m * m correlation matrix. */ #pragma omp parallel for schedule(static, 1) simd for (j1 = 0; j1 < _PB_M-1; j1++) { symmat[j1][j1] = 1.0; for (j2 = j1+1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += (data[i][j1] * data[i][j2]); symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop symmat[_PB_M-1][_PB_M-1] = 1.0; } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_correlation (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean), POLYBENCH_ARRAY(stddev)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); POLYBENCH_FREE_ARRAY(stddev); return 0; }
kronecker.h
/** * @file kronecker.h * @author Nader KHAMMASSI - nader.khammassi@gmail.com * @date * @brief */ #include <iostream> #include <vector> #include <complex> #include <cstring> #include "qx/core/linalg.h" #ifdef USE_LIBDIVIDE #include <libdivide.h> #endif //#define println(x) std::cout << x << std::endl //#define print(x) std::cout << x /** * type definition */ //typedef std::complex<double> complex_t; typedef std::vector<qx::linalg::complex_t> row_t; typedef std::vector<row_t> matrix_t; typedef std::vector<qx::linalg::complex_t> vector_t; namespace qx { namespace linalg { /** * kronecker operator interface */ class kronecker_operator { public: virtual complex_t get(size_t i, size_t j) const = 0; virtual size_t size() const = 0; }; /** * identity */ class identity : public kronecker_operator { public: identity(size_t n) : n(n), zero(0.0), one(1.0) { } inline complex_t get(size_t i, size_t j) const { return (i==j ? one : zero); } size_t size() const { return n; } private: size_t n; const complex_t zero; const complex_t one; }; /** * unitary matrix */ class unitary_matrix : public kronecker_operator { public: unitary_matrix(size_t n, matrix_t& m) : n(n), m(m) { } inline complex_t get(size_t i, size_t j) const { return (m[i][j]); } size_t size() const { return n; } private: size_t n; matrix_t m; }; /** * kronecker */ class kronecker { public: kronecker(kronecker_operator * m1, kronecker_operator * m2, kronecker_operator * m3=NULL) : m1(m1), m2(m2), m3(m3) { } inline complex_t get(size_t i, size_t j) const { if (!m3) { size_t n1 = m1->size(); size_t n2 = m2->size(); complex_t c1 = m1->get(i/n2, j/n2); complex_t c2 = m2->get(i%n2, j%n2); // usleep((i+1)*500+(j+i)*500); println("k.get(" << i << "," << j << ") : " << c1 << " * " << c2 << "(n1=" << n1 << ", n2=" << n2 << ")"); return (c1*c2); } else { size_t n1 = m1->size(); size_t n2 = m2->size(); size_t n3 = m3->size(); complex_t c1 = m1->get(i/(n2*n3), j/(n2*n3)); complex_t c2 = m2->get((i/n3)%n2, (j/n3)%n2); complex_t c3 = m3->get(i%n3, j%n3); return (c1*c2*c3); } } private: kronecker_operator * m1; kronecker_operator * m2; kronecker_operator * m3; }; /** * const */ const static complex_t __c_zero__ = 0.0; const static complex_t __c_one__ = 1.0f; const static complex_t i_diag[] = { 0.0, 1.0 }; #if 0 /** * kronecker */ class kronecker_ui { public: kronecker_ui(cmatrix_t& m, size_t nm, size_t ni) : m(m), nm(nm), ni(ni) { } inline complex_t get(size_t i, size_t j) { return m(i%nm,j%nm); /* complex_t& c1 = m(i%nm,j%nm); // U // usleep((i+1)*500+(j+i)*500); println("k_ui.get(" << i << "," << j << ") : " << c1 << " * " << c2 << "(nm=" << nm << ", ni=" << ni << ")"); return ((i/nm) == (j/nm) ? c1 : __c_zero__); */ } inline void get(size_t i, size_t j, complex_t& c) { complex_t& c1 = m(i%nm,j%nm); // U const complex_t& c2 = ((i/nm) == (j/nm) ? __c_one__ : __c_zero__); // I c = c1*c2; } cmatrix_t m; private: size_t nm; size_t ni; }; /** * kronecker */ class kronecker_iu { public: kronecker_iu(cmatrix_t& m, size_t nm, size_t ni) : m(m), nm(nm), ni(ni) { } inline complex_t get(size_t i, size_t j) { return m(i/ni,j/ni); /* complex_t& c1 = m(i/ni,j/ni); // U // usleep((i+1)*500+(j+i)*500); println("k_ui.get(" << i << "," << j << ") : " << c1 << " * " << c2 << "(nm=" << nm << ", ni=" << ni << ")"); return ((i%nm) == (j%nm) ? c1 : __c_zero__); */ } inline void get(size_t i, size_t j, complex_t& c) { complex_t& c1 = m(i/nm,j/nm); // U const complex_t& c2 = ((i%nm) == (j%nm) ? __c_one__ : __c_zero__); // I c = c1*c2; } cmatrix_t m; private: size_t nm; size_t ni; }; /** * kronecker_iui */ class kronecker_iui { public: kronecker_iui(cmatrix_t& m, size_t nm, size_t ni1, size_t ni2) : m(m), nm(nm), ni1(ni1), ni2(ni2) { } inline complex_t get(size_t i, size_t j) { return m((i/ni2)%nm,(j/ni2)%nm); /* complex_t& c = m((i/ni2)%nm,(j/ni2)%nm); // U bool i1 = (i/(nm*ni2)) == (j/(nm*ni2)); bool i2 = ((i%ni2) == (j%ni2)); return ((i1 && i2) ? c : __c_zero__); */ } inline void get(size_t i, size_t j, complex_t& c) { complex_t& c1 = m(i%nm,j%nm); // U const complex_t& c2 = ((i/nm) == (j/nm) ? __c_one__ : __c_zero__); // I c = c1*c2; } cmatrix_t m; private: size_t nm; size_t ni1; size_t ni2; }; #endif #define __mod_2(x) (x & 1) /** * kronecker */ class kronecker_ui { public: kronecker_ui(const complex_t * m, size_t nm, size_t ni) : m(m), nm(nm), ni(ni) { } inline complex_t get(size_t i, size_t j) { // return m(i%nm,j%nm); // return m[(i%2)*2+j%2]; return m[__mod_2(i)*2+__mod_2(j)]; } /* inline void get(size_t i, size_t j, complex_t& c) { complex_t& c1 = m(i%nm,j%nm); // U const complex_t& c2 = ((i/nm) == (j/nm) ? __c_one__ : __c_zero__); // I c = c1*c2; } */ const complex_t * m; private: size_t nm; size_t ni; }; /** * kronecker */ #if 0 class kronecker_iu { public: kronecker_iu(const complex_t * m, size_t nm, size_t ni) : m(m), nm(nm), ni(ni), fast_ni(ni) { } inline complex_t get(uint64_t i, uint64_t j) { // return m(i/ni,j/ni); // return m[(i/ni)*2+(j/ni)]; // return m[(i/fast_ni)*2+(j/fast_ni)]; return m[(2*i+j)/fast_ni]; } /* inline void get(size_t i, size_t j, complex_t& c) { complex_t& c1 = m(i/nm,j/nm); // U const complex_t& c2 = ((i%nm) == (j%nm) ? __c_one__ : __c_zero__); // I c = c1*c2; } */ const complex_t * m; private: uint64_t nm; uint64_t ni; libdivide::divider<uint64_t> fast_ni; }; #endif /** * kronecker */ class kronecker_iu { public: kronecker_iu(const complex_t * m, size_t nm, size_t ni) : m(m), nm(nm), ni(ni) { } inline complex_t get(size_t i, size_t j) { return m(i/ni,j/ni); /* complex_t& c1 = m(i/ni,j/ni); // U // usleep((i+1)*500+(j+i)*500); println("k_ui.get(" << i << "," << j << ") : " << c1 << " * " << c2 << "(nm=" << nm << ", ni=" << ni << ")"); return ((i%nm) == (j%nm) ? c1 : __c_zero__); */ } inline void get(size_t i, size_t j, complex_t& c) { complex_t& c1 = m(i/nm,j/nm); // U const complex_t& c2 = ((i%nm) == (j%nm) ? __c_one__ : __c_zero__); // I c = c1*c2; } cmatrix_t m; private: size_t nm; size_t ni; }; /** * kronecker_iui */ class kronecker_iui { public: kronecker_iui(const complex_t * m, size_t nm, size_t ni1, size_t ni2) : m(m), nm(nm), ni1(ni1), ni2(ni2) #ifdef USE_LIBDIVIDE , fast_ni2(ni2) #endif { } inline complex_t get(uint64_t i, uint64_t j) { // return m((i/ni2)%nm,(j/ni2)%nm); // return m[((i/ni2)%2)*2+(j/ni2)%2]; // return m[__mod_2(i/ni2)*2+__mod_2((j/ni2))]; #ifdef USE_LIBDIVIDE return m[__mod_2(i/fast_ni2)*2+__mod_2((j/fast_ni2))]; #else return m[__mod_2(i/ni2)*2+__mod_2((j/ni2))]; #endif } /* inline void get(size_t i, size_t j, complex_t& c) { complex_t& c1 = m(i%nm,j%nm); // U const complex_t& c2 = ((i/nm) == (j/nm) ? __c_one__ : __c_zero__); // I c = c1*c2; } */ const complex_t * m; private: uint64_t nm; uint64_t ni1; uint64_t ni2; #ifdef USE_LIBDIVIDE libdivide::divider<uint64_t> fast_ni2; #endif }; void printv(cvector_t& v) { print("[ "); for (std::size_t i=0; i<v.size(); ++i) print(v[i].re << ", "); //print(v[i].real() << ", "); println(" ]"); } void mulmv(kronecker& k, cvector_t& v, cvector_t& r) { #pragma omp parallel for schedule(static) for (int64_t i=0; i<(int64_t)v.size(); i++) { complex_t s = 0.0; for (std::size_t j=0; j<v.size(); j++) s += v[j]*(k.get(i,j)); r[i] = s; } } /** * to be tested for correctness */ void mulmv_(kronecker& k, cvector_t& v, cvector_t& r) { complex_t s = 0.; complex_t x = 0.; #pragma omp parallel for private(s,x) schedule(static) for (int64_t i=0; i<(int64_t)v.size(); i++) { s = 0; for (std::size_t j=0; j<v.size(); j++) { x = k.get(i,j); //if ((x.real() != 0) || (x.imag() != 0)) if ((x.re != 0) || (x.im != 0)) s += v[j]*x; } r[i] = s; } } void mulmv(kronecker& k, cvector_t& v, cvector_t& r, size_t block_ib, size_t block_ie, size_t block_jb, size_t block_je) { for (std::size_t i=block_ib; i<block_ie; i++) { complex_t s = r[i]; for (std::size_t j=block_jb; j<block_je; j++) s += v[j]*(k.get(i,j)); r[i] = s; } } } // namespace qx } // namespace linalg
pooling_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if defined(__ARM_NEON) static void pooling3x3s2_max_neon(const Mat& bottom_blob, Mat& top_blob) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = w - 2*outw + w; #pragma omp parallel for for (int q=0; q<inch; q++) { const float* img0 = bottom_blob.channel(q); float* outptr = top_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; for (int i = 0; i < outh; i++) { #if defined(__ARM_NEON) int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if defined(__ARM_NEON) #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld2 {v0.4s, v1.4s}, [%1], #32 \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v4.4s, v5.4s}, [%3], #32 \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld2 {v6.4s, v7.4s}, [%1], #32 \n" "fmax v12.4s, v0.4s, v1.4s \n" "fmax v13.4s, v2.4s, v3.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v8.4s, v9.4s}, [%2], #32 \n" "fmax v14.4s, v4.4s, v5.4s \n" "ext v0.16b, v0.16b, v6.16b, #4 \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v10.4s, v11.4s}, [%3], #32 \n" "ext v2.16b, v2.16b, v8.16b, #4 \n" "fmax v12.4s, v12.4s, v0.4s \n" "ext v4.16b, v4.16b, v10.16b, #4 \n" "fmax v13.4s, v13.4s, v2.4s \n" "fmax v14.4s, v14.4s, v4.4s \n" "fmax v12.4s, v12.4s, v13.4s \n" "orr v0.16b, v6.16b, v6.16b \n" "orr v1.16b, v7.16b, v7.16b \n" "fmax v12.4s, v12.4s, v14.4s \n" "orr v2.16b, v8.16b, v8.16b \n" "orr v3.16b, v9.16b, v9.16b \n" "orr v4.16b, v10.16b, v10.16b \n" "orr v5.16b, v11.16b, v11.16b \n" "subs %w0, %w0, #1 \n" "st1 {v12.4s}, [%4], #16 \n" "bne 0b \n" "sub %1, %1, #32 \n" "sub %2, %2, #32 \n" "sub %3, %3, #32 \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(outptr) // %4 : "0"(nn), "1"(r0), "2"(r1), "3"(r2), "4"(outptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14" ); } #else if (nn > 0) { asm volatile( "pld [%1, #256] \n" "vld2.f32 {d0-d3}, [%1]! \n"// q0 = 0 2 4 6 q1 = 1 3 5 7 "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "pld [%3, #256] \n" "vld2.f32 {d8-d11}, [%3]! \n" "0: \n" "pld [%1, #256] \n" "vld2.f32 {d12-d15}, [%1]! \n"// q6 = 8 10 12 14 q7 = 9 11 13 15 "vmax.f32 q12, q0, q1 \n" "vmax.f32 q13, q2, q3 \n" "pld [%2, #256] \n" "vld2.f32 {d16-d19}, [%2]! \n" "vmax.f32 q14, q4, q5 \n" "vext.32 q0, q0, q6, #1 \n" "pld [%3, #256] \n" "vld2.f32 {d20-d23}, [%3]! \n" "vext.32 q2, q2, q8, #1 \n" "vmax.f32 q12, q12, q0 \n" "vext.32 q4, q4, q10, #1 \n" "vmax.f32 q13, q13, q2 \n" "vmax.f32 q14, q14, q4 \n" "vmax.f32 q12, q12, q13 \n" "vorr q0, q6, q6 \n" "vorr q1, q7, q7 \n" "vmax.f32 q12, q12, q14 \n" "vorr q2, q8, q8 \n" "vorr q3, q9, q9 \n" "vorr q4, q10, q10 \n" "vorr q5, q11, q11 \n" "subs %0, #1 \n" "vst1.f32 {d24-d25}, [%4]! \n" "bne 0b \n" "sub %1, #32 \n" "sub %2, #32 \n" "sub %3, #32 \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(outptr) // %4 : "0"(nn), "1"(r0), "2"(r1), "3"(r2), "4"(outptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float max0 = std::max(std::max(r0[0], r0[1]), r0[2]); float max1 = std::max(std::max(r1[0], r1[1]), r1[2]); float max2 = std::max(std::max(r2[0], r2[1]), r2[2]); *outptr = std::max(std::max(max0, max1), max2); r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep;//1 + w; r1 += tailstep;//1 + w; r2 += tailstep;//1 + w; } } } #endif // __ARM_NEON
cc_fullycon.c
#include <stdio.h> #include "cc_assert.h" #include "cc_basic.h" #include "cc_conv2d.h" #include "cc_fmap2d.h" #include "cc_tsrmgr.h" #include "cc_fullycon.h" #include "global_fn_cfg.h" extern fn_array_dot_prod _array_dot_prod; cc_tensor_t *cc_fully_connected(const cc_tensor_t *inp, const cc_tensor_t *w, const cc_tensor_t *b, const char *name) { cc_ssize i, mmsize, dtsize; cc_tensor_t *oup = NULL; cc_ssize shape[CC_CNN2D_SHAPE] = {0}; #ifdef ENABLE_CC_ASSERT cc_assert((cc_dimension(w) == CC_CONV2D_KERNEL_DIM) || (cc_dimension(w) == CC_FULLYCON_KERNEL_DIM)); cc_assert_zero(cc_dimension(inp) - CC_CNN2D_DIM); cc_assert_zero(*inp->dtype - *w->dtype); cc_assert_zero(*inp->dtype - *b->dtype); cc_assert_zero(inp->shape[CC_CNN2D_SHAPE_C] - w->shape[CC_CONV2D_KERNEL_I]); #endif #ifdef AUTO_TSRMGR oup = cc_tsrmgr_get(name); #endif if (!oup) { shape[CC_CNN2D_SHAPE_C] = w->shape[CC_CONV2D_KERNEL_O]; shape[CC_CNN2D_SHAPE_H] = 1; shape[CC_CNN2D_SHAPE_W] = 1; oup = cc_create(shape, *inp->dtype, name); } dtsize = cc_dtype_size(*inp->dtype); mmsize = inp->shape[CC_CNN2D_SHAPE_C] * dtsize; #ifdef ENABLE_OPENMP #pragma omp parallel for private(i) #endif for (i = 0; i < w->shape[CC_CONV2D_KERNEL_O]; ++i) { _array_dot_prod(inp->data, w->data + i * mmsize, inp->shape[CC_CNN2D_SHAPE_C], oup->data + i * dtsize, *inp->dtype); } if (b) oup = cc_fmap2d_bias(oup, b, oup->name); return oup; }
GB_binop__pair_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_uint16) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__pair_uint16) // A.*B function (eWiseMult): GB (_AemultB_03__pair_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pair_uint16) // A*D function (colscale): GB (_AxD__pair_uint16) // D*A function (rowscale): GB (_DxB__pair_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__pair_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__pair_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_uint16) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = 1 #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_UINT16 || GxB_NO_PAIR_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pair_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__pair_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__pair_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__pair_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pair_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__pair_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pair_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
GB_unop__identity_fc64_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fc64_uint64 // op(A') function: GB_unop_tran__identity_fc64_uint64 // C type: GxB_FC64_t // A type: uint64_t // cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fc64_uint64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fc64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Searching.202008062049.computation_quota.profile.h
// // Created by Zhen Peng on 8/6/2020. // #ifndef BATCH_SEARCHING_SEARCHING_H #define BATCH_SEARCHING_SEARCHING_H #include <vector> #include <boost/dynamic_bitset.hpp> //#include <boost/sort/sort.hpp> #include <iostream> #include <fstream> #include <unordered_map> #include <immintrin.h> #include <cstring> #include <unordered_set> #include <set> #include <cfloat> #include <algorithm> //#include <omp.h> #include "../include/definitions.h" //#include "../include/efanna2e/neighbor.h" #include "../include/utils.h" #include "../include/Candidate.h" #include "../include/parallelization.h" #include "../include/bitvector.h" namespace PANNS { class Searching { //private: public: idi num_v_ = 0; edgei num_e_ = 0; idi num_queries_ = 0; uint64_t dimension_ = 0; idi width_ = 0; // NSG largest degree idi ep_ = 0; // Start point // std::vector<dataf> data_load_; // std::vector<dataf> queries_load_; // std::vector< std::vector<dataf> > data_load_; // std::vector< std::vector<dataf> > queries_load_; // std::vector<distf> norms_; dataf *data_load_ = nullptr; dataf *queries_load_ = nullptr; // dataf *norms_; // std::vector< std::vector<idi> > nsg_graph_; // idi *nsg_graph_indices_; // idi *nsg_graph_out_edges_; // std::vector< std::vector<idi> > edge_list_; char *opt_nsg_graph_ = nullptr; uint64_t data_bytes_; uint64_t neighbor_bytes_; uint64_t vertex_bytes_; // For multithreads int num_threads_ = 1; // int num_real_threads_ = 1; // int num_threads_intra_query_ = 1; // int num_threads_inter_query_ = 1; uint64_t thread_compuation_quota_ = 0; std::vector<uint64_t> threads_computations_; dataf compute_norm( const dataf *data) const; // idi vertex_id); // const std::vector<PANNS::dataf> &data); // size_t loc_start, // idi dimension) dataf compute_distance_with_norm( const dataf *v_data, const dataf *q_data, // idi vertex_id, // idi query_id, // const std::vector<dataf> &d_data, // const std::vector<dataf> &q_data, // PANNS::idi d_start, // PANNS::idi q_start, const dataf vertex_norm) const; static idi add_into_queue( std::vector<PANNS::Candidate> &queue, const idi queue_start, idi &queue_size, const idi queue_capacity, const PANNS::Candidate &cand); static void add_into_queue_at( const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, // The insertion location, independent with queue_start const idi queue_start, idi &queue_top, // The number of elements in queue, independent with queue_start const idi queue_size); // The maximum capacity of queue, independent with queue_start. static void insert_one_element_at( // const T &cand, // T *queue_base, const Candidate &cand, std::vector<Candidate> &queue_base, const idi insert_index, const idi queue_start, const idi queue_size); static idi merge_two_queues_into_1st_queue_seq_fixed( std::vector<Candidate> &queue1, const idi queue1_start, const idi queue1_size, std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size); static idi merge_two_queues_into_1st_queue_seq_incr( std::vector<Candidate> &queue1, const idi queue1_start, idi &queue1_size, // The number of element in queue1, independent with queue1_start. const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start. std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size); idi merge_all_queues_para_array( std::vector<Candidate> &set_L, // std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, const idi local_queue_length, // std::vector<Candidate> &set_L, const idi L); idi merge_queues_of_four( std::vector<Candidate> &set_L, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes, const idi group_id, const idi local_queue_capacity, const idi master_queue_capacity); idi merge_all_queues_to_master( std::vector<Candidate> &set_L, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes, const idi local_queue_capacity, const idi local_master_queue_capacity, const idi master_queue_capacity, const idi group_size); idi master_top_m_to_groups( std::vector<Candidate> &set_L, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes, std::vector<idi> &top_m_candidates, const std::vector<idi> &top_m_candidates_starts, std::vector<idi> &top_m_candidates_sizes, const idi k_uc, idi &last_k, const idi M, const idi num_groups); // const idi group_size); public: // For Profiling // L3CacheMissRate cache_miss_kernel; uint64_t count_distance_computation_ = 0; uint64_t count_full_merge_ = 0; // uint64_t count_add_to_queue_ = 0; // uint64_t count_single_query_computation_ = 0; // distf dist_min_ = 0; // distf dist_max_ = 0; // double time_merge_ = 0; double time_gather_ = 0; double time_move_top_m_ = 0; double time_full_merge_ = 0; // double time_select_ = 0; // double time_select_L_ = 0.0; // double time_select_M_ = 0.0; // double time_initialization_ = 0; // double time_sequential_phase_ = 0; // double time_parallel_phase_ = 0; // double time_ending_ = 0.0; // double time_assign_s_ = 0.0; // double time_expand_ = 0.0; // double time_pick_top_m_ = 0.0; // double time_distance_computation_ = 0.0; // double time_add_to_queue_ = 0.0; // double time_insert_ = 0; // double time_compare_minimum_ = 0; // double time_memmove_ = 0; // std::vector<double> time_memmove_list_; // L3CacheMissRate profile_miss_rate; // uint64_t number_local_elements_ = 0; // std::vector<idi> L_ids_; // std::vector<idi> M_ids_; ~Searching() { free(data_load_); data_load_ = nullptr; // free(queries_load_); // _mm_free(data_load_); free(queries_load_); queries_load_ = nullptr; // free(norms_); // free(nsg_graph_indices_); // free(nsg_graph_out_edges_); free(opt_nsg_graph_); opt_nsg_graph_ = nullptr; } void load_data_load(char *filename); void load_queries_load(char *filename); void load_nsg_graph(char *filename); // void build_opt_graph(); void prepare_init_ids( std::vector<unsigned> &init_ids, const unsigned L) const; void subsearch_with_top_m( const idi value_M_max, const idi query_id, const idi local_L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &local_top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &local_count_distance_computation); void subsearch_top_m_for_one_iteration( const idi iter, idi &k_uc, const idi value_M, const idi query_id, const dataf *query_data, const idi L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &count_distance_computation); void seq_search_with_top_m_double_m( const idi M_max, const idi query_id, const idi K, const idi global_L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited); idi expand_one_candidate( idi cand_id, const dataf *query_data, const distf &dist_bound, std::vector<Candidate> &set_L, const idi local_queue_start, idi &local_queue_size, const idi &local_queue_capacity, boost::dynamic_bitset<> &is_visited, uint64_t &local_count_computation); void para_search_with_top_m_hierarchy_merge_v1( const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_capacity, // Maximum size of local queue const idi local_master_queue_capacity, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes, // Sizes of local queue // std::vector< std::vector<idi> > &top_m_candidates_list, // every group has one top-M queue std::vector<idi> &top_m_candidate, const std::vector<idi> &top_m_candidates_starts, std::vector<idi> &top_m_candidates_sizes, boost::dynamic_bitset<> &is_visited, const idi group_size, // Should be 4 const idi full_merge_freq); void para_search_with_top_m_less_sync_v0( const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_capacity, // Maximum size of local queue const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited, const idi full_merge_freq, const idi local_iter_bound); void load_true_NN( const char *filename, std::vector< std::vector<idi> > &true_nn_list); void get_recall_for_all_queries( const std::vector< std::vector<idi> > &true_nn_list, const std::vector<std::vector<unsigned>> &set_K_list, std::unordered_map<unsigned, double> &recalls) const; }; // Class Searching /** * Input the data from the file. * @param filename */ inline void Searching::load_data_load(char *filename) { auto old_d = dimension_; DiskIO::load_data( filename, data_load_, num_v_, dimension_); if (old_d) { if (old_d != dimension_) { std::cerr << "Error: data dimension " << dimension_ << " is not equal to query dimension " << old_d << "." << std::endl; exit(EXIT_FAILURE); } } } /** * Input queries from the file. * @param filename */ inline void Searching::load_queries_load(char *filename) { auto old_d = dimension_; DiskIO::load_data( filename, queries_load_, num_queries_, dimension_); if (old_d) { if (old_d != dimension_) { std::cerr << "Error: query dimension " << dimension_ << " is not equal to data dimension " << old_d << "." << std::endl; exit(EXIT_FAILURE); } } } /** * Input the NSG graph from the file. * Reference: https://github.com/ZJULearning/nsg/blob/master/src/index_nsg.cpp * @param filename */ inline void Searching::load_nsg_graph(char *filename) { std::ifstream fin(filename); if (!fin.is_open()) { std::cerr << "Error: cannot read file " << filename << " ." << std::endl; exit(EXIT_FAILURE); } fin.read(reinterpret_cast<char *>(&width_), sizeof(unsigned)); fin.read(reinterpret_cast<char *>(&ep_), sizeof(unsigned)); data_bytes_ = (1 + dimension_) * sizeof(dataf); neighbor_bytes_ = (1 + width_) * sizeof(idi); vertex_bytes_ = data_bytes_ + neighbor_bytes_; opt_nsg_graph_ = (char *) malloc(num_v_ * vertex_bytes_); if (!opt_nsg_graph_) { std::cerr << "Error: no enough memory for opt_nsg_graph_." << std::endl; exit(EXIT_FAILURE); } idi v_id = 0; num_e_ = 0; char *base_location = opt_nsg_graph_; while (true) { idi degree; fin.read(reinterpret_cast<char *>(&degree), sizeof(unsigned)); if (fin.eof()) { break; } num_e_ += degree; // std::vector<idi> tmp_ngbrs(degree); // fin.read(reinterpret_cast<char *>(tmp_ngbrs.data()), degree * sizeof(unsigned)); // Norm and data distf norm = compute_norm(data_load_ + v_id * dimension_); // distf norm = compute_norm(v_id); std::memcpy(base_location, &norm, sizeof(distf)); // Norm memcpy(base_location + sizeof(distf), data_load_ + v_id * dimension_, dimension_ * sizeof(dataf)); // Data base_location += data_bytes_; // Neighbors memcpy(base_location, &degree, sizeof(idi)); // Number of neighbors fin.read(base_location + sizeof(idi), degree * sizeof(unsigned)); // Neighbors // memcpy(location + sizeof(idi), tmp_ngbrs.data(), degree * sizeof(unsigned)); base_location += neighbor_bytes_; ++v_id; } if (v_id != num_v_) { std::cerr << "Error: NSG data has " << v_id << " vertices, but origin data has " << num_v_ << " vertices." << std::endl; exit(EXIT_FAILURE); } free(data_load_); data_load_ = nullptr; // //////////////////////// // idi v_id = 0; // num_e_ = 0; // while (true) { // idi degree; // fin.read(reinterpret_cast<char *>(&degree), sizeof(unsigned)); // if (fin.eof()) { // break; // } // num_e_ += degree; // // std::vector<idi> ngbrs(degree); // fin.read(reinterpret_cast<char *>(ngbrs.data()), degree * sizeof(unsigned)); //// nsg_graph_.push_back(ngbrs); //// tmp_edge_list.push_back(ngbrs); // edge_list_.push_back(ngbrs); // ++v_id; // } // if (v_id != num_v_) { // std::cerr << "Error: NSG data has " << v_id // << " vertices, but origin data has " << num_v_ << " vertices." << std::endl; // exit(EXIT_FAILURE); // } } /** * Load those true top-K neighbors (ground truth) of queries * @param filename * @param[out] true_nn_list */ inline void Searching::load_true_NN( const char *filename, std::vector< std::vector<idi> > &true_nn_list) // unsigned &t_K) { std::ifstream fin(filename); if (!fin.is_open()) { fprintf(stderr, "Error: cannot open file %s\n", filename); exit(EXIT_FAILURE); } idi t_query_num; idi t_K; // unsigned t_K; fin.read(reinterpret_cast<char *>(&t_query_num), sizeof(t_query_num)); fin.read(reinterpret_cast<char *>(&t_K), sizeof(t_K)); // if (t_query_num != query_num) { // fprintf(stderr, "Error: query_num %u is not equal to the record %u in true-NN file %s\n", // query_num, t_query_num, filename); // exit(EXIT_FAILURE); // } if (t_query_num < num_queries_) { fprintf(stderr, "Error: t_query_num %u is smaller than num_queries_ %u\n", t_query_num, num_queries_); exit(EXIT_FAILURE); } if (t_K < 100) { fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K); exit(EXIT_FAILURE); } // data = new unsigned[(size_t) t_query_num * (size_t) t_K]; true_nn_list.resize(t_query_num); for (idi q_i = 0; q_i < t_query_num; ++q_i) { true_nn_list[q_i].resize(t_K); } for (unsigned q_i = 0; q_i < t_query_num; ++q_i) { // size_t offset = q_i * t_K; for (unsigned n_i = 0; n_i < t_K; ++n_i) { unsigned id; float dist; fin.read(reinterpret_cast<char *>(&id), sizeof(id)); fin.read(reinterpret_cast<char *>(&dist), sizeof(dist)); // data[offset + n_i] = id; true_nn_list[q_i][n_i] = id; } } fin.close(); } inline void Searching::get_recall_for_all_queries( const std::vector< std::vector<idi> > &true_nn_list, const std::vector<std::vector<unsigned>> &set_K_list, std::unordered_map<unsigned, double> &recalls) const { // if (t_K < 100) { // fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K); // exit(EXIT_FAILURE); // } if (true_nn_list[0].size() < 100) { fprintf(stderr, "Error: Number of true nearest neighbors of a query is smaller than 100.\n"); exit(EXIT_FAILURE); } recalls[1] = 0.0; recalls[5] = 0.0; recalls[10] = 0.0; recalls[20] = 0.0; recalls[50] = 0.0; recalls[100] = 0.0; for (unsigned q_i = 0; q_i < num_queries_; ++q_i) { // size_t offset = q_i * t_K; for (unsigned top_i = 0; top_i < 100; ++top_i) { unsigned true_id = true_nn_list[q_i][top_i]; for (unsigned n_i = 0; n_i < 100; ++n_i) { if (set_K_list[q_i][n_i] == true_id) { if (n_i < 1) recalls[1] += 1; if (n_i < 5) recalls[5] += 1; if (n_i < 10) recalls[10] += 1; if (n_i < 20) recalls[20] += 1; if (n_i < 50) recalls[50] += 1; if (n_i < 100) recalls[100] += 1; } } } } recalls[1] /= 1.0 * num_queries_; recalls[5] /= 5.0 * num_queries_; recalls[10] /= 10.0 * num_queries_; recalls[20] /= 20.0 * num_queries_; recalls[50] /= 50.0 * num_queries_; recalls[100] /= 100.0 * num_queries_; } /** * Prepare init_ids and flags, as they are constant for all queries. * @param[out] init_ids * @param L */ inline void Searching::prepare_init_ids( std::vector<unsigned int> &init_ids, const unsigned L) const { // idi num_ngbrs = get_out_degree(ep_); // edgei edge_start = nsg_graph_indices_[ep_]; // // Store ep_'s neighbors as candidates // idi tmp_l = 0; // for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) { // init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l]; // } // std::unordered_set<idi> visited_ids; boost::dynamic_bitset<> is_selected(num_v_); idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; idi init_ids_end = 0; // for (; tmp_l < L && tmp_l < out_degree; tmp_l++) { for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) { // idi v_id = out_edges[tmp_l]; idi v_id = out_edges[e_i]; if(is_selected[v_id]) { continue; } is_selected[v_id] = true; // init_ids[tmp_l] = v_id; init_ids[init_ids_end++] = v_id; // init_ids[tmp_l] = out_edges[tmp_l]; // visited_ids.insert(init_ids[tmp_l]); } // for (idi i = 0; i < tmp_l; ++i) { // is_visited[init_ids[i]] = true; // } // If ep_'s neighbors are not enough, add other random vertices idi tmp_id = ep_ + 1; // use tmp_id to replace rand(). while (init_ids_end < L) { tmp_id %= num_v_; idi v_id = tmp_id++; if (is_selected[v_id]) { continue; } // if (visited_ids.find(id) != visited_ids.end()) { // continue; // } is_selected[v_id] = true; // visited_ids.insert(id); init_ids[init_ids_end++] = v_id; // tmp_l++; } } // TODO: re-code in AVX-512 inline dataf Searching::compute_norm( const dataf *data) const // idi vertex_id) // const std::vector<PANNS::dataf> &data) // size_t loc_start, // idi dimension) { // const dataf *a = data.data() + loc_start; // const dataf *a = data_load_ + vertex_id * dimension_; // idi size = dimension_; dataf result = 0; //#define AVX_L2NORM(addr, dest, tmp) \ // tmp = _mm256_load_ps(addr); \ // tmp = _mm256_mul_ps(tmp, tmp); \ // dest = _mm256_add_ps(dest, tmp); #define AVX_L2NORM(addr, dest, tmp) \ tmp = _mm256_loadu_ps(addr); \ tmp = _mm256_mul_ps(tmp, tmp); \ dest = _mm256_add_ps(dest, tmp); __m256 sum; __m256 l0, l1; unsigned D = (dimension_ + 7) & ~7U; unsigned DR = D % 16; unsigned DD = D - DR; const float *l = data; const float *e_l = l + DD; float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0}; sum = _mm256_load_ps(unpack); // sum = _mm256_loadu_ps(unpack); if (DR) { AVX_L2NORM(e_l, sum, l0); } for (unsigned i = 0; i < DD; i += 16, l += 16) { AVX_L2NORM(l, sum, l0); AVX_L2NORM(l + 8, sum, l1); } _mm256_store_ps(unpack, sum); // _mm256_storeu_ps(unpack, sum); result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7]; return result; } inline dataf Searching::compute_distance_with_norm( const dataf *v_data, const dataf *q_data, // idi vertex_id, // idi query_id, // const std::vector<PANNS::dataf> &d_data, // const std::vector<PANNS::dataf> &q_data, // PANNS::idi d_start, // PANNS::idi q_start, const dataf vertex_norm) const // idi dimension) { // idi size = dimension_; float result = 0; //#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \ // tmp1 = _mm256_load_ps(addr1);\ // tmp2 = _mm256_load_ps(addr2);\ // tmp1 = _mm256_mul_ps(tmp1, tmp2); \ // dest = _mm256_add_ps(dest, tmp1); #define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \ tmp1 = _mm256_loadu_ps(addr1);\ tmp2 = _mm256_loadu_ps(addr2);\ tmp1 = _mm256_mul_ps(tmp1, tmp2); \ dest = _mm256_add_ps(dest, tmp1); __m256 sum; __m256 l0, l1; __m256 r0, r1; unsigned D = (dimension_ + 7) & ~7U; unsigned DR = D % 16; unsigned DD = D - DR; const float *l = v_data; const float *r = q_data; // const float *l = (float *) (opt_nsg_graph_ + vertex_id * vertex_bytes_ + sizeof(distf)); // const float *r = queries_load_ + query_id * dimension_; const float *e_l = l + DD; const float *e_r = r + DD; float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0}; sum = _mm256_load_ps(unpack); // sum = _mm256_loadu_ps(unpack); if (DR) { AVX_DOT(e_l, e_r, sum, l0, r0); } for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) { AVX_DOT(l, r, sum, l0, r0); AVX_DOT(l + 8, r + 8, sum, l1, r1); } _mm256_store_ps(unpack, sum); // _mm256_storeu_ps(unpack, sum); result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7]; result = -2 * result + vertex_norm; return result; } // // The difference from insert_into_queue is that add_into_queue will increase the queue size by 1. // add_into_queue with a queue_start inline idi Searching::add_into_queue( std::vector<PANNS::Candidate> &queue, const idi queue_start, idi &queue_size, // The insertion location starting from queue_start const idi queue_capacity, // The maximum capacity of queue, independent with queue_start. const PANNS::Candidate &cand) { if (0 == queue_size) { queue[queue_start + queue_size++] = cand; return 0; } idi queue_end = queue_start + queue_size; // Find the insert location const auto it_loc = std::lower_bound(queue.begin() + queue_start, queue.begin() + queue_end, cand); // auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_size, cand); idi insert_loc = it_loc - queue.begin(); if (insert_loc != queue_end) { if (cand.id_ == it_loc->id_) { // Duplicate return queue_capacity; } if (queue_size >= queue_capacity) { // Queue is full --queue_size; --queue_end; } } else { // insert_loc == queue_end, insert at the end? if (queue_size < queue_capacity) { // Queue is not full // Insert at the end queue[insert_loc] = cand; ++queue_size; return queue_size - 1; } else { // Queue is full return queue_capacity; } } // Add into queue memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1), reinterpret_cast<char *>(queue.data() + insert_loc), (queue_end - insert_loc) * sizeof(Candidate)); queue[insert_loc] = cand; ++queue_size; return insert_loc - queue_start; } inline void Searching::add_into_queue_at( const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, // The insertion location, independent with queue_start const idi queue_start, idi &queue_size, // The number of elements in queue, independent with queue_start const idi queue_length) // The maximum capacity of queue, independent with queue_start. { const idi dest_index = queue_start + insert_index; if (queue_size == queue_length) { --queue_size; } memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1), reinterpret_cast<char *>(queue.data() + dest_index), (queue_size - insert_index) * sizeof(Candidate)); queue[dest_index] = cand; ++queue_size; } inline void Searching::insert_one_element_at( // const T &cand, // T *queue_base, const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, const idi queue_start, const idi queue_size) { const idi dest_index = queue_start + insert_index; memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1), reinterpret_cast<char *>(queue.data() + dest_index), (queue_size - insert_index - 1) * sizeof(Candidate)); queue[dest_index] = cand; // memmove(reinterpret_cast<char *>(queue_base + dest_index + 1), // reinterpret_cast<char *>(queue_base + dest_index), // (queue_size - insert_index - 1) * sizeof(T)); // for (idi q_i = queue_size - 1; q_i > insert_index; --q_i) { // queue_base.at(q_i + queue_start) = queue_base.at(q_i - 1 + queue_start); // } // queue_base[dest_index] = cand; } /* Function: * queue1_size is fixed. */ inline idi Searching::merge_two_queues_into_1st_queue_seq_fixed( std::vector<Candidate> &queue1, const idi queue1_start, const idi queue1_size, std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size) // const idi limit_size) { assert(queue1_size && queue2_size); // Record the lowest insert location. auto it_loc = std::lower_bound( queue1.begin() + queue1_start, queue1.begin() + queue1_start + queue1_size, queue2[queue2_start]); idi insert_index = it_loc - (queue1.begin() + queue1_start); if (insert_index == queue1_size) { return insert_index; } else if (insert_index == queue1_size - 1) { queue1[queue1_start + insert_index] = queue2[queue2_start]; return insert_index; } // Insert the 1st of queue2 if (queue2[queue2_start].id_ != it_loc->id_) { // Not Duplicate insert_one_element_at( queue2[queue2_start], queue1, insert_index, queue1_start, queue1_size); } if (queue2_size == 1) { return insert_index; } // Insert idi q_i_1 = insert_index + 1 + queue1_start; idi q_i_2 = queue2_start + 1; const idi q_i_1_bound = queue1_start + queue1_size; const idi q_i_2_bound = queue2_start + queue2_size; // const idi insert_i_bound = queue1_start + limit_size; for (idi insert_i = insert_index + 1; insert_i < queue1_size; ++insert_i) { if (q_i_1 >= q_i_1_bound || q_i_2 >= q_i_2_bound) { // queue1 or queue2 finished traverse. Rest o break; } else if (queue1[q_i_1] < queue2[q_i_2]) { ++q_i_1; } else if (queue2[q_i_2] < queue1[q_i_1]) { // Insert queue2[q_i_2] into queue1 insert_one_element_at( queue2[q_i_2++], queue1, insert_i, queue1_start, queue1_size); ++q_i_1; } else { // Duplicate ++q_i_2; ++q_i_1; } } return insert_index; } /* Function: * queue1_size should be updated. * queue1_length should be provided. */ inline idi Searching::merge_two_queues_into_1st_queue_seq_incr( std::vector<Candidate> &queue1, const idi queue1_start, idi &queue1_size, // The number of element in queue1, independent with queue1_start. const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start. std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size) // const idi limit_size) { assert(queue1_size && queue2_size); // Record the lowest insert location. auto it_loc = std::lower_bound( queue1.begin() + queue1_start, queue1.begin() + queue1_start + queue1_size, queue2[queue2_start]); idi insert_index = it_loc - (queue1.begin() + queue1_start); if (insert_index == queue1_size) { idi copy_count = (queue1_size + queue2_size > queue1_length) ? queue1_length - queue1_size : queue2_size; memmove(queue1.data() + queue1_start + queue1_size, queue2.data() + queue2_start, copy_count * sizeof(Candidate)); queue1_size += copy_count; return insert_index; } if (queue2[queue2_start].id_ != it_loc->id_) { // Not Duplicate add_into_queue_at( queue2[queue2_start], queue1, insert_index, queue1_start, queue1_size, queue1_length); } if (queue2_size == 1) { return insert_index; } // Insert idi q_i_1 = insert_index + 1 + queue1_start; idi q_i_2 = queue2_start + 1; idi q_i_1_bound = queue1_start + queue1_size; // When queue1_size is updated, so should be q_i_1_bound. const idi q_i_2_bound = queue2_start + queue2_size; // idi insert_i; for (idi insert_i = insert_index + 1; insert_i < queue1_length; ++insert_i) { if (q_i_1 >= q_i_1_bound) { queue1_size += std::min(queue1_length - insert_i, q_i_2_bound - q_i_2); for ( ; insert_i < queue1_size; ++insert_i) { queue1[queue1_start + insert_i] = queue2[q_i_2++]; } break; } else if (q_i_2 >= q_i_2_bound) { break; } else if (queue1[q_i_1] < queue2[q_i_2]) { ++q_i_1; } else if (queue2[q_i_2] < queue1[q_i_1]) { add_into_queue_at( queue2[q_i_2++], queue1, insert_i, queue1_start, queue1_size, queue1_length); ++q_i_1; q_i_1_bound = queue1_start + queue1_size; } else { // Duplicate ++q_i_2; ++q_i_1; } } return insert_index; } /* Function: * Use large local_queues_array as a concatenation of all queues */ inline idi Searching::merge_all_queues_para_array( std::vector<Candidate> &set_L, std::vector<idi> &local_queues_ends, const idi local_queue_length, const idi L) { const int num_queues = num_threads_; idi nk = L; int size = 1 << (static_cast<idi>(log2(num_queues))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { uint32_t by = 1 << (d + 1); #pragma omp parallel for for (int i = 0; i < size; i += by) { idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1 idi a_start = ai * local_queue_length; idi bi = i + (1 << d) - 1; // i + 2^d - 1 idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { std::copy(set_L.begin() + b_start, set_L.begin() + b_start + local_queues_ends[bi], set_L.begin() + a_start); // Copy bi to ai local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } if (ai != static_cast<idi>(num_queues - 1)) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_ends[ai], local_queue_length, set_L, b_start, local_queues_ends[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, a_start, L, set_L, b_start, local_queues_ends[bi]); if (r < nk) { nk = r; } } } } // // Remain, prefix-sum-like merge // if (size != num_queues) { // for (int i = size; i < num_queues; ++i) { // idi ai = i; // idi a_start = ai * local_queue_length; // idi bi = i - 1; // idi b_start = bi * local_queue_length; // if (0 == local_queues_ends[bi]) { // continue; // } // if (local_queues_ends[ai] == 0) { // std::copy(set_L.begin() + b_start, // set_L.begin() + b_start + local_queues_ends[bi], // set_L.begin() + a_start); // Copy bi to ai // local_queues_ends[ai] = local_queues_ends[bi]; // local_queues_ends[bi] = 0; // continue; // } // if (ai != static_cast<idi>(num_queues - 1)) { // merge_two_queues_into_1st_queue_seq_incr( // set_L, // a_start, // local_queues_ends[ai], // local_queue_length, // set_L, // b_start, // local_queues_ends[bi]); // } else { // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // a_start, // L, // set_L, // b_start, // local_queues_ends[bi]); // if (r < nk) { // nk = r; // } // } // } // } // Reset local_queues_ends // Not do this for Collector Idea or Selecting Idea std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); return nk; // return r; } /* * Function: merge 4 queues into the last queue */ inline idi Searching::merge_queues_of_four( std::vector<Candidate> &set_L, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes, const idi group_id, const idi local_queue_capacity, const idi master_queue_capacity) { // const int num_queues = 4; const idi group_start = group_id * 4; idi nk = master_queue_capacity; #pragma omp parallel for num_threads(2) for (int i = 0; i < 2; ++i) { const idi bi = 2 * i + group_start; const idi ai = bi + 1; if (!local_queues_sizes[bi]) { continue; } if (!local_queues_sizes[ai]) { std::copy( set_L.begin() + local_queues_starts[bi], set_L.begin() + local_queues_starts[bi] + local_queues_sizes[bi], set_L.begin() + local_queues_starts[ai]); local_queues_sizes[ai] = local_queues_sizes[bi]; local_queues_sizes[bi] = 0; continue; } if (ai != 3 + group_start) { merge_two_queues_into_1st_queue_seq_incr( set_L, local_queues_starts[ai], local_queues_sizes[ai], local_queue_capacity, set_L, local_queues_starts[bi], local_queues_sizes[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_incr( set_L, local_queues_starts[ai], local_queues_sizes[ai], master_queue_capacity, set_L, local_queues_starts[bi], local_queues_sizes[bi]); if (r < nk) { nk = r; } } local_queues_sizes[bi] = 0; } { const idi bi = 1 + group_start; const idi ai = 3 + group_start; if (!local_queues_sizes[bi]) { return nk; } if (!local_queues_sizes[ai]) { std::copy( set_L.begin() + local_queues_starts[bi], set_L.begin() + local_queues_starts[bi] + local_queues_sizes[bi], set_L.begin() + local_queues_starts[ai]); local_queues_sizes[ai] = local_queues_sizes[bi]; local_queues_sizes[bi] = 0; return 0; } idi r = merge_two_queues_into_1st_queue_seq_incr( set_L, local_queues_starts[ai], local_queues_sizes[ai], master_queue_capacity, set_L, local_queues_starts[bi], local_queues_sizes[bi]); if (r < nk) { nk = r; } local_queues_sizes[bi] = 0; } return nk; } /* * Function: used by hierarchical merging idea. * Merge all queues into the last queue. * Difference with merge_all_queues_para_array: here the last queue might not have L elements in the beginning, * so use merge_two_queues_into_1st_queue_seq_incr(), not merge_two_queues_into_1st_queue_seq_fixed(). */ inline idi Searching::merge_all_queues_to_master( std::vector<Candidate> &set_L, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes, const idi local_queue_capacity, const idi local_master_queue_capacity, const idi master_queue_capacity, const idi group_size) { const idi num_queues = num_threads_; idi nk = master_queue_capacity; int size = num_queues; // int size = 1 << (static_cast<idi>(log2(num_queues))); idi log2size = static_cast<idi>(log2(size)); idi num_t = num_queues >> 1; for (idi d = 0; d < log2size; ++d) { uint32_t by = 1 << (d + 1); //#pragma omp parallel for num_threads(2) //#pragma omp parallel for for (int i = 0; i < size; i += by) { idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1 // idi a_start = ai * local_queue_capacity; idi a_start = local_queues_starts[ai]; idi bi = i + (1 << d) - 1; // i + 2^d - 1 // idi b_start = bi * local_queue_capacity; idi b_start = local_queues_starts[bi]; if (0 == local_queues_sizes[bi]) { continue; } {//test printf("local_queues_sizes[%u]: %u\n", bi, local_queues_sizes[bi]); } if (local_queues_sizes[ai] == 0) { std::copy(set_L.begin() + b_start, set_L.begin() + b_start + local_queues_sizes[bi], set_L.begin() + a_start); // Copy bi to ai local_queues_sizes[ai] = local_queues_sizes[bi]; local_queues_sizes[bi] = 0; continue; } if ((group_size - 1) != ai % 4) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_sizes[ai], local_queue_capacity, set_L, b_start, local_queues_sizes[bi]); } else if (num_queues - 1 != ai) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_sizes[ai], local_master_queue_capacity, set_L, b_start, local_queues_sizes[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_sizes[ai], master_queue_capacity, set_L, b_start, local_queues_sizes[bi]); if (ai == num_queues - 1 && r < nk) { nk = r; } } local_queues_sizes[bi] = 0; } num_t >>= 1; } // Reset local_queues_sizes // Not do this for Collector Idea or Selecting Idea // std::fill(local_queues_sizes.begin(), local_queues_sizes.end() - 1, 0); // std::fill(local_queues_sizes.begin(), local_queues_sizes.end(), 0); return nk; } /* * Function: distribute master queue's top-M unchecked elements to top_m_candidates. * Used by hierarchical merging idea. */ inline idi Searching::master_top_m_to_groups( std::vector<Candidate> &set_L, const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes, std::vector<idi> &top_m_candidates, const std::vector<idi> &top_m_candidates_starts, std::vector<idi> &top_m_candidates_sizes, const idi k_uc, idi &last_k, const idi M, const idi num_groups) // const idi group_size) { const idi last_queue_start = local_queues_starts[num_threads_ - 1]; idi c_i_start = k_uc + last_queue_start; idi c_i_bound = last_queue_start + local_queues_sizes[num_threads_ - 1]; idi top_m_count = 0; // std::vector<idi> tmp_sizes(num_groups, 0); // idi tmp_sizes[2] = {0, 0}; for (idi c_i = c_i_start; c_i < c_i_bound && top_m_count < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i - last_queue_start; set_L[c_i].is_checked_ = true; idi g_i = top_m_count & (num_groups - 1); // idi g_i = top_m_count % num_groups; ++top_m_count; // top_m_candidates[top_m_candidates_starts[g_i] + tmp_sizes[g_i]++] = set_L[c_i].id_; // top_m_candidates[top_m_candidates_sizes[g_i]] = set_L[c_i].id_; // top_m_candidates[top_m_candidates_sizes[g_i]++] = set_L[c_i].id_; // top_m_candidates[top_m_candidates_starts[g_i] + top_m_candidates_sizes[g_i]] = set_L[c_i].id_; // ++top_m_candidates_sizes[g_i]; top_m_candidates[top_m_candidates_starts[g_i] + top_m_candidates_sizes[g_i]++] = set_L[c_i].id_; } // top_m_candidates_sizes[0] = tmp_sizes[0]; // top_m_candidates_sizes[1] = tmp_sizes[1]; // std::fill(top_m_candidates_sizes.begin(), top_m_candidates_sizes.end(), M / num_groups); // top_m_candidates_sizes[0] = M / num_groups; // top_m_candidates_sizes[1] = M / num_groups; // std::copy(tmp_sizes.begin(), tmp_sizes.end(), top_m_candidates_sizes.begin()); // top_m_candidates_sizes[0] = tmp_sizes[0]; top_m_candidates_sizes[1] = tmp_sizes[1]; return top_m_count; // idi m_i = 0; // const idi master_start = local_queues_starts[num_threads_ - 1]; // const idi e_i_bound = local_queues_sizes[num_threads_ - 1]; // for (idi e_i = 0; e_i < e_i_bound; ++e_i) { // idi group_id = e_i % num_groups; // if (num_groups - 1 == group_id) { // set_L[master_start + m_i++] = set_L[master_start + e_i]; // } else { // idi q_id = group_id * group_size + group_size - 1; // set_L[local_queues_starts[q_id] + local_queues_sizes[q_id]++] = set_L[master_start + e_i]; // } // } // local_queues_sizes[num_threads_ - 1] = m_i; } /* * 6/22/2020-21:30 * Do searching on the local_set_L * local_set_L is already sorted * is_visited is already set up. */ inline void Searching::subsearch_with_top_m( const idi value_M_max, const idi query_id, const idi local_L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &local_top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &local_count_distance_computation) { const dataf *query_data = queries_load_ + query_id * dimension_; // idi local_top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi iter = 0; idi M = 1; // value of M while (k < local_L) { ++iter; subsearch_top_m_for_one_iteration( iter, k, M, query_id, query_data, local_L, set_L, set_L_start, set_L_size, local_top_m_candidates, is_visited, local_count_distance_computation); {// Scale M if (M < value_M_max) { M <<= 1; } // else { // M = value_M_max; // } } } // {//test // printf("set_L_start: %u " // "local_count_distance_computation: %lu\n", // set_L_start, // local_count_distance_computation); // } } /* * 7/6/2020-23:17 * Subsearch only 1 iteration using top-m */ inline void Searching::subsearch_top_m_for_one_iteration( const idi iter, idi &k_uc, const idi value_M, const idi query_id, const dataf *query_data, const idi L, std::vector<Candidate> &set_L, const idi set_L_start, idi &set_L_size, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited, uint64_t &count_distance_computation) { // Select M candidates idi top_m_candidates_end = 0; idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k_uc; c_i < set_L_size && top_m_candidates_end < value_M; ++c_i) { idi index_set_L = c_i + set_L_start; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } idi nk = L; // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; expand_one_candidate( cand_id, query_data, set_L[set_L_size - 1 + set_L_start].distance_, set_L, set_L_start, set_L_size, L, is_visited, count_distance_computation); // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[set_L_size - 1 + set_L_start].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // idi r = add_into_queue( // set_L, // set_L_start, // set_L_size, // L, // cand); // if (r < nk) { // nk = r; // } // } } // top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k_uc = nk; } else { k_uc = last_k + 1; } } /* * 7/31/2020-12:48 * Use for profile. Sequential Double-M. */ inline void Searching::seq_search_with_top_m_double_m( const idi M_max, const idi query_id, const idi K, const idi global_L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited) { // time_initialization_ -= WallTimer::get_time_mark(); std::vector<idi> top_m_candidates(M_max); boost::dynamic_bitset<> is_visited(num_v_); uint64_t tmp_count_computation = 0; idi set_L_size; {// Initialization // is_visited flag array //#pragma omp parallel for // Cannot use OMP for bit array is_visited! for (idi c_i = 0; c_i < global_L; ++c_i) { is_visited[init_ids[c_i]] = 1; } const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for for (idi v_i = 0; v_i < global_L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for reduction(+ : tmp_count_computation) for (idi id_i = 0; id_i < global_L; ++id_i) { idi v_id = init_ids[id_i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[id_i] = Candidate(v_id, dist, false); // False means not checked. } set_L_size = global_L; count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; std::sort(set_L.begin(), set_L.begin() + global_L); } // time_initialization_ += WallTimer::get_time_mark(); // Searching subsearch_with_top_m( M_max, query_id, global_L, set_L, 0, set_L_size, top_m_candidates, is_visited, tmp_count_computation); count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; // time_merge_ -= WallTimer::get_time_mark(); // time_ending_ -= WallTimer::get_time_mark(); // time_merge_ += WallTimer::get_time_mark(); { for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; // set_K[k_i] = set_L[k_i].id_; } } // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); // } // time_ending_ += WallTimer::get_time_mark(); // {//test // if (3 == query_id) { // exit(1); // } // } } /* * 8/6/2020-21:08 * The same procedure with Middle-M, but do hierarchical merging to reduce merging frequency. * Right now there are only 3 levels (1 middle level). And 4 workers form a group. */ inline void Searching::para_search_with_top_m_hierarchy_merge_v1( const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_capacity, // Maximum size of local queue const idi local_master_queue_capacity, // Maximum size of local master queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes, // Sizes of local queue // std::vector< std::vector<idi> > &top_m_candidates_list, // every group has one top-M queue std::vector<idi> &top_m_candidates, const std::vector<idi> &top_m_candidates_starts, std::vector<idi> &top_m_candidates_sizes, // std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited, const idi group_size, // Should be 4 const idi full_merge_freq) { // time_initialization_ -= WallTimer::get_time_mark(); // const idi base_set_L = (num_threads_ - 1) * local_queue_length; const idi master_queue_start = local_queues_starts[num_threads_ - 1]; const idi num_groups = (num_threads_ - 1) / group_size + 1; // 4 workers per group. const dataf *query_data = queries_load_ + query_id * dimension_; // Initialization Phase { //#pragma omp parallel for for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; } //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // Get the distances of all candidates, store in the set set_L. uint64_t tmp_count_computation = 0; #pragma omp parallel for reduction(+ : tmp_count_computation) for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i + master_queue_start] = Candidate(v_id, dist, false); // False means not checked. } count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; std::sort( set_L.begin() + master_queue_start, set_L.begin() + master_queue_start + L); local_queues_sizes[num_threads_ - 1] = L; } // Initialization Phase // time_initialization_ += WallTimer::get_time_mark(); // idi top_m_candidates_end = 0; idi iter = 0; // for debug idi M = 1; idi k = 0; // Index of first unchecked candidate. // Sequential Phase { uint64_t tmp_count_computation = 0; while (k < L && M < value_M_middle) { ++iter; subsearch_top_m_for_one_iteration( iter, k, M, query_id, query_data, L, set_L, master_queue_start, local_queues_sizes[num_threads_ - 1], top_m_candidates, is_visited, tmp_count_computation); count_distance_computation_ += tmp_count_computation; threads_computations_[0] += tmp_count_computation; tmp_count_computation = 0; if (threads_computations_[0] >= thread_compuation_quota_) { break; } {// Double M if (M < value_M_max) { M <<= 1U; } } } } // Sequential Phase // if (M < static_cast<idi>(num_threads_)) { // M = num_threads_; // } // Divide computation cost from thread 0 to others { // printf("threads_computations_[0]: %lu\n", // threads_computations_[0]); std::fill( threads_computations_.begin(), threads_computations_.end(), threads_computations_[0] / num_threads_); } // Parallel Phase idi para_iter = 0; // if (true) { if (num_threads_ <= 4) { idi top_m_candidates_size = 0; idi last_k; idi nk; uint64_t tmp_count_computation = 0; while (true) { // while (k < L) { ++iter; // {//test // printf("query_id: %u " // "iter: %u \n", // query_id, // iter); // } last_k = L; // Pick top-M for (idi c_i = k; c_i < L && top_m_candidates_size < M; ++c_i) { idi index_set_L = c_i + master_queue_start; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_size++] = set_L[index_set_L].id_; } if (!top_m_candidates_size) { break; } // time_pick_top_m_ += WallTimer::get_time_mark(); nk = L; // Push M candidates' neighbors into the queue. #pragma omp parallel for reduction(+ : tmp_count_computation) for (idi c_i = 0; c_i < top_m_candidates_size; ++c_i) { int tid = omp_get_thread_num(); // Computation quota if (threads_computations_[tid] >= thread_compuation_quota_) { continue; } uint64_t tmp_last_count_computation = tmp_count_computation; idi local_queue_start = local_queues_starts[tid]; idi &local_queue_size = local_queues_sizes[tid]; idi cand_id = top_m_candidates[c_i]; if (num_threads_ - 1 != tid) { expand_one_candidate( cand_id, query_data, set_L[master_queue_start + L - 1].distance_, set_L, local_queue_start, local_queue_size, local_queue_capacity, is_visited, tmp_count_computation); } else { idi r = expand_one_candidate( cand_id, query_data, set_L[master_queue_start + L - 1].distance_, set_L, local_queue_start, local_queue_size, L, is_visited, tmp_count_computation); if (r < nk) { nk = r; } } threads_computations_[tid] += tmp_count_computation - tmp_last_count_computation; } top_m_candidates_size = 0; // Clear top_m_candidates count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; // // Merge. Merge all queues in parallel. { // time_merge_ -= WallTimer::get_time_mark(); if (num_threads_ > 1) { time_full_merge_ -= WallTimer::get_time_mark(); idi r = merge_all_queues_para_array( set_L, local_queues_sizes, local_queue_capacity, L); if (r < nk) { nk = r; } time_full_merge_ += WallTimer::get_time_mark(); } } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1U; } } } } else { // 8 threads idi tmp_iter_bound = 9; bool is_finished = false; bool is_full_merged = true; idi M_group; std::vector<idi> ks(num_groups, 0); ks[num_groups - 1] = k; std::vector<idi> nks(num_groups); std::vector<idi> last_ks(num_groups); uint64_t tmp_count_distance_computation = 0; // bool is_finished = false; while (!is_finished) { ++para_iter; ++iter; M_group = M / num_groups; is_finished = true; auto s = std::chrono::high_resolution_clock::now(); if (1 == para_iter || (para_iter - 1) % full_merge_freq) { // Initialize every group's top-M candidates from the global Master queue // time_move_top_m_ -= WallTimer::get_time_mark(); master_top_m_to_groups( set_L, local_queues_starts, local_queues_sizes, top_m_candidates, top_m_candidates_starts, top_m_candidates_sizes, ks[num_groups - 1], last_ks[num_groups - 1], M, num_groups); // time_move_top_m_ += WallTimer::get_time_mark(); // std::fill(top_m_candidates_sizes.begin(), top_m_candidates_sizes.end(), --tmp_iter_bound); } auto e = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> diff = e - s; time_move_top_m_ += diff.count(); // {//test // printf("query_id: %u " // "para_iter: %u " // "iter: %u " // "M_group: %u " // "top_m_c_sizes: %u %u\n", // query_id, // para_iter, // iter, // M_group, // top_m_candidates_sizes[0], top_m_candidates_sizes[1]); // } //#pragma omp parallel for num_threads(num_groups) \ // reduction(+ : tmp_count_distance_computation) //#pragma omp parallel for reduction(+ : tmp_count_distance_computation) for (idi g_i = 0; g_i < num_groups; ++g_i) { const idi local_master_queue_id = g_i * group_size + group_size - 1; const idi local_master_queue_start = local_queues_starts[local_master_queue_id]; idi &local_master_queue_size = local_queues_sizes[local_master_queue_id]; idi &k_uc = ks[g_i]; const idi top_m_candidates_start = top_m_candidates_starts[g_i]; idi &top_m_candidates_size = top_m_candidates_sizes[g_i]; idi &last_k = last_ks[g_i]; // Pick top-M // if (1 != para_iter && 0 == (para_iter - 1) % full_merge_freq) { //// if ((para_iter - 1) % full_merge_freq) { // last_k = L; // for (idi c_i = k_uc; c_i < local_master_queue_size && top_m_candidates_size < M_group; ++c_i) { // idi index_set_L = c_i + local_master_queue_start; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_start + top_m_candidates_size++] = set_L[index_set_L].id_; // } // } if (!top_m_candidates_size) { continue; } is_finished = false; idi &nk = nks[g_i]; nk = L; idi c_i_start = top_m_candidates_starts[g_i]; idi c_i_bound = c_i_start + top_m_candidates_size; uint64_t tmp_count_distance_computation_ig = 0; // Expand top-M //#pragma omp parallel for num_threads(group_size) \ // reduction(+ : tmp_count_distance_computation_ig) //#pragma omp parallel for reduction(+ : tmp_count_distance_computation_ig) for (idi c_i = c_i_start; c_i < c_i_bound; ++c_i) { idi tid_ig = omp_get_thread_num(); // idi tid_ig = (c_i - c_i_start) % group_size; idi q_id = g_i * group_size + tid_ig; if (threads_computations_[q_id] >= thread_compuation_quota_) { continue; } // uint64_t tmp_last_count_computation_ig = tmp_count_distance_computation_ig; // idi cand_id = top_m_candidates[c_i]; //// _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; //// for (idi n_i = 0; n_i < out_degree; ++n_i) { //// _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); //// } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++tmp_count_distance_computation_ig; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (local_master_queue_size == local_master_queue_capacity // && dist > set_L[local_master_queue_size - 1 + local_master_queue_start].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // // Add to the local queue. // // if (0 != tid_ig) { // // Non-Master threads using local queues // add_into_queue( // set_L, // local_queues_starts[q_id - 1], // local_queues_sizes[q_id - 1], // local_queue_capacity, // cand); // } else if (num_groups - 1 != g_i) { // // Thread 0 but not the last group maintains the local master queue // idi r = add_into_queue( // set_L, // local_master_queue_start, // local_master_queue_size, // local_master_queue_capacity, // cand); // if (r < nk) { // nk = r; // } // } else { // // Thread 0 and the last group maintains the master queue // idi r = add_into_queue( // set_L, // local_master_queue_start, // local_master_queue_size, // L, // cand); // if (r < nk) { // nk = r; // } // } // } // threads_computations_[q_id] += tmp_count_distance_computation_ig - tmp_last_count_computation_ig; } // Expand in a group tmp_count_distance_computation += tmp_count_distance_computation_ig; top_m_candidates_size = 0; // Merge in a group // if (0 == (para_iter % full_merge_freq)) { // idi r; // if (num_groups - 1 != g_i) { // // Normal group // r = merge_queues_of_four( // set_L, // local_queues_starts, // local_queues_sizes, // g_i, // local_queue_capacity, // local_master_queue_capacity); // } else { // // The group contains the master queue // r = merge_queues_of_four( // set_L, // local_queues_starts, // local_queues_sizes, // g_i, // local_queue_capacity, // L); // } // if (r < nk) { // nk = r; // } // if (nk <= last_k) { // k_uc = nk; // } else { // k_uc = last_k + 1; // } // } } // Middle Level Parallelism count_distance_computation_ += tmp_count_distance_computation; tmp_count_distance_computation = 0; // Do full merge and distribute if (!is_finished && para_iter % full_merge_freq) { // Full merge time_full_merge_ -= WallTimer::get_time_mark(); ++count_full_merge_; idi r = merge_all_queues_to_master( set_L, local_queues_starts, local_queues_sizes, local_queue_capacity, local_master_queue_capacity, L, group_size); time_full_merge_ += WallTimer::get_time_mark(); // is_full_merged = true; // idi &nk = nks[num_groups - 1]; // idi &k_uc = ks[num_groups - 1]; // idi &last_k = last_ks[num_groups - 1]; // if (r < nk) { // nk = r; // } // if (nk <= last_k) { // k_uc = nk; // } else { // k_uc = last_k + 1; // } } else { is_full_merged = false; } {// Scale M if (M < value_M_max) { M <<= 1U; } } } // Iteration // if (!is_full_merged) { // merge_all_queues_to_master( // set_L, // local_queues_sizes, // local_queues_sizes, // local_queue_capacity, // local_master_queue_capacity, // L, // group_size); // } } #pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i + master_queue_start].id_; // set_K[k_i] = set_L[k_i].id_; } {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); // is_visited.clear_all(); // std::fill(local_queues_sizes.begin(), local_queues_sizes.end(), 0); std::fill(threads_computations_.begin(), threads_computations_.end(), 0); } // {//test // if (3 == query_id) { // exit(1); // } // } } /* * Function: expand a candidate, visiting its neighbors. * Return the lowest adding location. */ inline idi Searching::expand_one_candidate( idi cand_id, const dataf *query_data, const distf &dist_bound, std::vector<Candidate> &set_L, const idi local_queue_start, idi &local_queue_size, const idi &local_queue_capacity, boost::dynamic_bitset<> &is_visited, // const idi nk_init, uint64_t &local_count_computation) { // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // tmp_time_pick_top_m += WallTimer::get_time_mark(); idi nk = local_queue_capacity; for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++local_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > dist_bound) { // if (dist > set_L[L - 1 + master_queue_start].distance_) { continue; } Candidate cand(nb_id, dist, false); // Add to the local queue. idi r = add_into_queue( set_L, local_queue_start, local_queue_size, local_queue_capacity, cand); if (r < nk) { nk = r; } } return nk; } /* * 8/6/2020-11:58 * Based on Middle-4, but reduce full merge frequency. * Actually, this is local Searching, not Less Synchronization. */ inline void Searching::para_search_with_top_m_less_sync_v0( const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_capacity, // Maximum size of local queue const std::vector<idi> &local_queues_starts, std::vector<idi> &local_queues_sizes, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited, const idi full_merge_freq, const idi local_iter_bound) { const idi master_queue_start = local_queues_starts[num_threads_ - 1]; const dataf *query_data = queries_load_ + query_id * dimension_; // Initialization Phase { //#pragma omp parallel for for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; } //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // Get the distances of all candidates, store in the set set_L. uint64_t tmp_count_computation = 0; #pragma omp parallel for reduction(+ : tmp_count_computation) for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i + master_queue_start] = Candidate(v_id, dist, false); // False means not checked. } count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; std::sort( set_L.begin() + master_queue_start, set_L.begin() + master_queue_start + L); local_queues_sizes[num_threads_ - 1] = L; } // Initialization Phase idi iter = 0; // for debug idi M = 1; idi k = 0; // Index of first unchecked candidate. // Sequential Phase { uint64_t tmp_count_computation = 0; while (k < L && M < value_M_middle) { ++iter; subsearch_top_m_for_one_iteration( iter, k, M, query_id, query_data, L, set_L, master_queue_start, local_queues_sizes[num_threads_ - 1], top_m_candidates, is_visited, tmp_count_computation); count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; {// Double M if (M < value_M_max) { M <<= 1U; } } } } // Sequential Phase // if (M < static_cast<idi>(num_threads_)) { // M = num_threads_; // } // Parallel Phase idi para_iter = 0; idi top_m_candidates_size = 0; idi last_k; idi nk; uint64_t tmp_count_computation = 0; while (true) { // while (k < L) { ++para_iter; ++iter; last_k = L; // Pick top-M for (idi c_i = k; c_i < L && top_m_candidates_size < M; ++c_i) { idi index_set_L = c_i + master_queue_start; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_size++] = set_L[index_set_L].id_; } if (!top_m_candidates_size) { break; } nk = L; // Expand //#pragma omp parallel for reduction(+ : tmp_count_computation) #pragma omp parallel reduction(+ : tmp_count_computation) { #pragma omp for nowait for (idi c_i = 0; c_i < top_m_candidates_size; ++c_i) { int tid = omp_get_thread_num(); idi local_queue_start = local_queues_starts[tid]; idi &local_queue_size = local_queues_sizes[tid]; idi cand_id = top_m_candidates[c_i]; if (num_threads_ - 1 != tid) { expand_one_candidate( cand_id, query_data, set_L[master_queue_start + L - 1].distance_, set_L, local_queue_start, local_queue_size, local_queue_capacity, is_visited, tmp_count_computation); } else { idi r = expand_one_candidate( cand_id, query_data, set_L[master_queue_start + L - 1].distance_, set_L, local_queue_start, local_queue_size, L, is_visited, tmp_count_computation); if (r < nk) { nk = r; } } } // Expand if (0 == (para_iter % full_merge_freq)) { // Local search iterations int q_i = omp_get_thread_num(); idi local_queue_start = local_queues_starts[q_i]; idi &local_queue_size = local_queues_sizes[q_i]; const idi queue_capacity = (num_threads_ - 1 != q_i) ? local_queue_capacity : L; idi tmp_k; if (num_threads_ - 1 != q_i) { tmp_k = 0; } else { if (nk <= last_k) { tmp_k = nk; } else { tmp_k = last_k + 1; } } // if (tmp_k >= local_queue_size) { // continue; // } idi i_t = 0; idi cand_id; while (tmp_k < local_queue_size) { idi r; if (!set_L[local_queue_start + tmp_k].is_checked_) { set_L[local_queue_start + tmp_k].is_checked_ = true; cand_id = set_L[local_queue_start + tmp_k].id_; // Expand r = expand_one_candidate( cand_id, query_data, set_L[master_queue_start + L - 1].distance_, set_L, local_queue_start, local_queue_size, queue_capacity, is_visited, tmp_count_computation); if (++i_t == local_iter_bound) { break; } } else { r = queue_capacity; } if (r <= tmp_k) { tmp_k = r; } else { ++tmp_k; } } } // Local Search } // OMP Parallel Construct top_m_candidates_size = 0; // Clear top_m_candidates count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; // // Merge. Merge all queues in parallel. { // time_merge_ -= WallTimer::get_time_mark(); if (num_threads_ > 1) { idi r = merge_all_queues_para_array( set_L, local_queues_sizes, local_queue_capacity, L); if (r < nk) { nk = r; } } } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1U; } } } #pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i + master_queue_start].id_; // set_K[k_i] = set_L[k_i].id_; } {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); // is_visited.clear_all(); // std::fill(local_queues_sizes.begin(), local_queues_sizes.end(), 0); } // {//test // if (14 == query_id) { // exit(1); // } // } } } // namespace PANNS #endif //BATCH_SEARCHING_SEARCHING_H
is_initial_device.c
// RUN: %libomptarget-compile-run-and-check-x86_64-pc-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu -DUNUSED -Wall -Werror #include <omp.h> #include <stdio.h> int main() { int errors = 0; #ifdef UNUSED // Test if it is OK to leave the variants unused in the header #else // UNUSED int host = omp_is_initial_device(); int device = 1; #pragma omp target map(tofrom : device) { device = omp_is_initial_device(); } if (!host) { printf("omp_is_initial_device() returned false on host\n"); errors++; } if (device) { printf("omp_is_initial_device() returned true on device\n"); errors++; } #endif // UNUSED // CHECK: PASS printf("%s\n", errors ? "FAIL" : "PASS"); return errors; }
gemv_c_bsr_conj.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #include <string.h> #endif static alphasparse_status_t gemv_bsr_conj_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_BSR* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { ALPHA_INT bs = A->block_size; ALPHA_INT bs2 = bs * bs; ALPHA_INT m_inner = A->rows; ALPHA_INT n_inner = A->cols; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_INT partition[thread_num + 1]; balanced_partition_row_by_nnz(A->rows_end, m_inner, thread_num, partition); ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num); #ifdef _OPENMP #pragma omp parallel num_threads(thread_num) #endif { const ALPHA_INT tid = alpha_get_thread_id(); const ALPHA_INT local_m_s = partition[tid]; const ALPHA_INT local_m_e = partition[tid + 1]; tmp[tid] = (ALPHA_Number*)malloc(sizeof(ALPHA_Number)*n_inner*bs); memset(tmp[tid], 0, sizeof(ALPHA_Number)*n_inner*bs); if(A->block_layout == ALPHA_SPARSE_LAYOUT_ROW_MAJOR){ for (ALPHA_INT i = local_m_s; i < local_m_e; i++) { for(ALPHA_INT ai = A->rows_start[i]; ai < A->rows_end[i];ai++) { // A index is (bs * i + block_row, bs * A->col_indx[ai] + block_col) // should multiplied by x[bs * i + block_row], for(ALPHA_INT block_row = 0; block_row < bs; block_row++){ for(ALPHA_INT block_col = 0; block_col < bs; block_col++){ ALPHA_Number cv = A->values[ai*bs2+block_col+block_row*bs]; alpha_conj(cv, cv); alpha_madde(tmp[tid][bs*A->col_indx[ai]+block_col], cv, x[bs*i+block_row]); } } } } } else if (A->block_layout == ALPHA_SPARSE_LAYOUT_COLUMN_MAJOR){ for (ALPHA_INT i = local_m_s; i < local_m_e; i++) { for(ALPHA_INT ai = A->rows_start[i]; ai < A->rows_end[i];ai++) { // index is (bs * i + block_row, bs * A->col_indx[ai] + block_col) // should multiplied by x[bs * i + block_row], for(ALPHA_INT block_col = 0; block_col < bs; block_col++){ for(ALPHA_INT block_row = 0; block_row < bs; block_row++){ ALPHA_Number cv = A->values[ai*bs2+block_col*bs+block_row]; alpha_conj(cv, cv); alpha_madde(tmp[tid][bs*A->col_indx[ai]+block_col], cv, x[bs*i+ block_row]); } } } } } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < n_inner*bs; ++i){ ALPHA_Number tmp_y; alpha_setzero(tmp_y); for(ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(tmp_y, tmp_y, tmp[j][i]); } alpha_mul(y[i], y[i], beta); alpha_madde(y[i], tmp_y, alpha); } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < thread_num; ++i) { free(tmp[i]); } free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_BSR* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { return gemv_bsr_conj_omp(alpha, A, x, beta, y); }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; class InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class AttributeList; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///\brief Source of additional semantic information. ExternalSemaSource *ExternalSource; ///\brief Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { // We are about to link these. It is now safe to compute the linkage of // the new decl. If the new decl has external linkage, we will // link it with the hidden decl (which also has external linkage) and // it will keep having external linkage. If it has internal linkage, we // will not link it. Since it has no previous decls, it will remain // with internal linkage. return isVisible(Old) || New->isExternallyVisible(); } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// \brief Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// \brief Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// \brief Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// \brief Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// \brief Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// \brief pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// \brief This represents the stack of attributes that were pushed by /// \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; AttributeList *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; SmallVector<PragmaAttributeEntry, 2> PragmaAttributeStack; /// \brief The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// \brief This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// \brief Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// \brief Store a list of either DeclRefExprs or MemberExprs /// that contain a reference to a variable (constant) that may or may not /// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue /// and discarded value conversions have been applied to all subexpressions /// of the enclosing full expression. This is cleared at the end of each /// full expression. llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; /// \brief Stack containing information about each of the nested /// function, block, and method scopes that are currently active. /// /// This array is never empty. Clients should ignore the first /// element, which is used to cache a single FunctionScopeInfo /// that's used to parse every top-level function. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType; /// \brief Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// \brief Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// \brief Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// \brief Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// \brief All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// \brief The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// \brief All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// \brief All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedExceptionSpecChecks; /// \brief All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// \brief Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// \brief The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// \brief RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// \brief Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// \brief The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// \brief The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// \brief The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// \brief The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// \brief The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// \brief The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// \brief Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// \brief The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// \brief The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// \brief Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// \brief Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// \brief The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// \brief The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// \brief Pointer to NSString type (NSString *). QualType NSStringPointer; /// \brief The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// \brief The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// \brief The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// \brief The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// \brief The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// \brief The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// \brief id<NSCopying> type. QualType QIDNSCopying; /// \brief will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// \brief Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// \brief The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// \brief The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// \brief The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// \brief The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// \brief The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// \brief The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// \brief The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// \brief Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// \brief The expression evaluation context. ExpressionEvaluationContext Context; /// \brief Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// \brief Whether we are in a decltype expression. bool IsDecltype; /// \brief The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// \brief The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// \brief The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// \brief The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// \brief The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// \brief If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// \brief If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, bool IsDecltype) : Context(Context), ParentCleanup(ParentCleanup), IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering() { } /// \brief Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// \brief Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// \brief A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// \brief A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// \brief The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// \brief The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// \brief A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// \brief Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// \brief Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///\brief Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// \brief Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// \brief Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// \brief Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// \brief Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// \brief Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// \brief Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// \brief Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// \brief This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const { if (FunctionScopes.empty()) return nullptr; for (int e = FunctionScopes.size()-1; e >= 0; --e) { if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) continue; return FunctionScopes[e]; } return nullptr; } template <typename ExprT> void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) { if (!isUnevaluatedContext()) getCurFunction()->recordUseOfWeak(E, IsRead); } void PushCompoundScope(); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// \brief Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// \brief Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// \brief Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// \brief Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo); /// \brief Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// \brief The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// \brief Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { clang::Module *Module; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// \brief Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// \brief Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// \brief Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; const IdentifierInfo *Keyword; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword), Keyword(Keyword) { } static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; default: llvm_unreachable("unsupported name classification."); } } }; /// \brief Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// \brief Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// \brief Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// \brief Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// \brief Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// \brief Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, AttributeList *AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Module, ///< 'module X;' Partition, ///< 'module partition X;' Implementation, ///< 'module implementation X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path); /// \brief The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// \brief The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// \brief Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// \brief Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// \brief We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// \brief We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// \brief Retrieve a suitable printing policy. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// \brief Retrieve a suitable printing policy. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, AttributeList *MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// \brief Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// \brief Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool EnumUnderlyingIsImplicit, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, AttributeList *Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, AttributeList *Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// \brief Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// \brief Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// \brief Don't merge availability attributes at all. AMK_None, /// \brief Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// \brief Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// \brief Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); CommonAttr *mergeCommonAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); /// \brief Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf ///< Condition in a constexpr if statement. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// \brief Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// \brief Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// \brief Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet, bool AllowObjCConversionOnExplicit); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// @brief Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// \brief Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// \brief Look up any declaration with any name. LookupAnyName }; /// \brief Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// \brief The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// \brief The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists. ForRedeclaration }; /// \brief The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// \brief The lookup resulted in an error. LOLR_Error, /// \brief The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// \brief The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// \brief The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// \brief The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// \brief The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// \brief Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // \brief The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// \brief Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// \brief Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// \brief Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// \brief Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const AttributeList *AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const AttributeList *AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const AttributeList &attr, unsigned &value); bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckNoReturnAttr(const AttributeList &attr); bool CheckNoCallerSavedRegsAttr(const AttributeList &attr); bool checkStringLiteralArgumentAttr(const AttributeList &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType &T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type. /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param nullabilityLoc The location of the nullability specifier. /// /// \param isContextSensitive Whether this nullability specifier was /// written as a context-sensitive keyword (in an Objective-C /// method) or an Objective-C property attribute, rather than as an /// underscored type specifier. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \returns true if nullability cannot be applied, false otherwise. bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation nullabilityLoc, bool isContextSensitive, bool allowArrayTypes); /// \brief Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// \brief Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// \brief - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// \brief - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// \brief Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg(ActOnFinishFullExpr(Arg, CC).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// \brief A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S): S(S) { S.ActOnStartOfCompoundStmt(); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal, SourceLocation DotDotDotLoc, Expr *RHSVal, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, bool AllowParamOrMoveConstructible); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, bool AllowParamOrMoveConstructible); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, llvm::InlineAsmIdentifierInfo &Info, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, llvm::InlineAsmIdentifierInfo &Info, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// \brief If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// \brief Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// \brief Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, bool IsDecltype = false); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// \brief Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// \brief Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// \brief Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// \brief Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// \brief Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// \brief Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// \brief Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, std::unique_ptr<CorrectionCandidateCallback> CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentType IT); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// \brief Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// \brief Describes the result of an "if-exists" condition check. enum IfExistsResult { /// \brief The symbol exists. IER_Exists, /// \brief The symbol does not exist. IER_DoesNotExist, /// \brief The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// \brief An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, AttributeList *AttrList, UsingDirectiveDecl * &UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; /// \brief Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// \brief Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// \brief Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, AttributeList *AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, AttributeList *AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, AttributeList *AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, AttributeList *AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// \brief Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// \brief Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(ComputedEST != EST_ComputedNoexcept && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// \brief The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// \brief The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// \brief Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// \brief Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// \brief Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_ComputedNoexcept; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// \brief Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defautled /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// \brief Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// \brief Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// \brief Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// \brief Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// \brief Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// \brief Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor); /// \brief Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// \brief Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// \brief Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// \brief Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// \brief Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// \brief Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// \brief Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// \brief Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// \brief When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// \brief RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// \brief Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// \brief Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// \brief Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, bool UseGlobal, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// \brief Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr) { return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc() : SourceLocation()); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue = false, bool IsConstexpr = false, bool IsLambdaInitCaptureInitializer = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// \brief The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// \brief The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// \brief Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// \brief The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// \brief The identifier preceding the '::'. IdentifierInfo *Identifier; /// \brief The location of the identifier. SourceLocation IdentifierLoc; /// \brief The location of the '::'. SourceLocation CCLoc; /// \brief Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// \brief The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// \brief The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// \brief Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// \brief Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// \brief Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// \brief Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, bool IsConstexprSpecified); /// \brief Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// \brief Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// \brief Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// \brief Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// \brief Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief Introduce the lambda parameters into scope. void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope); /// \brief Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// \brief Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::LambdaScopeInfo::Capture &From); /// \brief Diagnose if an explicit lambda capture is unused. void DiagnoseUnusedLambdaCapture(const sema::LambdaScopeInfo::Capture &From); /// \brief Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// \brief Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// \brief Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, AttributeList *Attrs = nullptr); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// \brief The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// \brief The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// \brief The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// \brief Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// \brief Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// \brief Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// \brief Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// \brief Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// \brief When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); Decl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); Decl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<Decl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// \brief The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// \brief Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, AttributeList *Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization(FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// \brief Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// \brief The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// \brief The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// \brief The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// \brief Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateArgument(TemplateTemplateParmDecl *Param, TemplateArgumentLoc &Arg, unsigned ArgumentPackIndex); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// \brief Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// \brief We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// \brief We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// \brief We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// \brief Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// \brief Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// \brief The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// \brief An arbitrary expression. UPPC_Expression = 0, /// \brief The base type of a class type. UPPC_BaseType, /// \brief The type of an arbitrary declaration. UPPC_DeclarationType, /// \brief The type of a data member. UPPC_DataMemberType, /// \brief The size of a bit-field. UPPC_BitFieldWidth, /// \brief The expression in a static assertion. UPPC_StaticAssertExpression, /// \brief The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// \brief The enumerator value. UPPC_EnumeratorValue, /// \brief A using declaration. UPPC_UsingDeclaration, /// \brief A friend declaration. UPPC_FriendDeclaration, /// \brief A declaration qualifier. UPPC_DeclarationQualifier, /// \brief An initializer. UPPC_Initializer, /// \brief A default argument. UPPC_DefaultArgument, /// \brief The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// \brief The type of an exception. UPPC_ExceptionType, /// \brief Partial specialization. UPPC_PartialSpecialization, /// \brief Microsoft __if_exists. UPPC_IfExists, /// \brief Microsoft __if_not_exists. UPPC_IfNotExists, /// \brief Lambda expression. UPPC_Lambda, /// \brief Block expression, UPPC_Block }; /// \brief Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// \brief If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// \brief If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// \brief If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// \brief If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// \brief If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// \brief If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// \brief Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// \brief Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// \brief Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// \brief Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// \brief Template argument deduction was successful. TDK_Success = 0, /// \brief The declaration was invalid; do nothing. TDK_Invalid, /// \brief Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// \brief Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// \brief Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// \brief Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// \brief Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// \brief After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// \brief After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// \brief A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// \brief When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// \brief When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// \brief The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// \brief Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// \brief Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// \brief CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// \brief Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// \brief Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// \brief Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// \brief Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// \brief Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// \brief The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, } Kind; /// \brief Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// \brief The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// \brief The entity that is being synthesized. Decl *Entity; /// \brief The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// \brief The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// \brief The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// \brief The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// \brief The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// \brief The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// \brief Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// \brief List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// \brief Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// \brief Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// \brief Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// \brief Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// \brief Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// \brief The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// \brief The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// \brief The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// \brief RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// \brief For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// \brief A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// \brief Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// \brief Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// \brief Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// \brief Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// \brief Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// \brief Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// \brief RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; } /// \brief Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// \brief RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// \brief The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// \brief Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// \brief The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// \brief A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// \brief Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// \brief An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// \brief The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// \brief The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateStaticDataMemberDefinition( SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface(Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl * const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, AttributeList *attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Check the application of the Objective-C '__kindof' qualifier to /// the given type. bool checkObjCKindOfType(QualType &type, SourceLocation loc); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. AttributeList *ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// \brief Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// \brief The message is sent to 'super'. ObjCSuperMessage, /// \brief The message is an instance message. ObjCInstanceMessage, /// \brief The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// \brief Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// \brief Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// \brief Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// \brief Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// \brief Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// \brief Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); /// \brief Called on well-formed '\#pragma clang attribute push'. void ActOnPragmaAttributePush(AttributeList &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); /// \brief Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc); /// \brief Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// \brief Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// \brief Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// \brief Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// \brief Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); void AddNSConsumedAttr(SourceRange AttrRange, Decl *D, unsigned SpellingListIndex, bool isNSConsumed, bool isTemplateInstantiation); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// \brief Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// \brief Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// \brief Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// \brief Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// \brief Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// \brief Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Set to true inside '#pragma omp declare target' region. bool IsInOpenMPDeclareTargetContext = false; /// \brief Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// \brief Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool IsOpenMPCapturedByRef(ValueDecl *D, unsigned Level); /// \brief Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *IsOpenMPCapturedDecl(ValueDecl *D); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// \brief Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(ValueDecl *D, unsigned Level); /// \brief Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(ValueDecl *D, unsigned Level); ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// \brief Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// \brief Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// \brief End analysis of clauses. void EndOpenMPClause(); /// \brief Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// \brief Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// \brief Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// \brief Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// \brief Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// \brief Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// \brief Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// \brief Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// \brief Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer); /// \brief Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OMPDeclareTargetDeclAttr::MapTypeTy MT, NamedDeclSetType &SameDirectiveDecls); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D); /// Return true inside OpenMP target region. bool isInOpenMPDeclareTargetContext() const { return IsInOpenMPDeclareTargetContext; } /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// \brief Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// \brief End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// \brief Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// \brief Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// \brief Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// \brief Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// \brief Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// \brief Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(OpenMPMapClauseKind MapTypeModifier, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'to' clause. OMPClause *ActOnOpenMPToClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief The kind of conversion being performed. enum CheckedConversionKind { /// \brief An implicit conversion. CCK_ImplicitConversion, /// \brief A C-style cast. CCK_CStyleCast, /// \brief A functional-style cast. CCK_FunctionalCast, /// \brief A cast other than a C-style cast. CCK_OtherCast }; /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // \brief If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool isRelational); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool isRelational); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// \brief Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// \brief Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// \brief Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// \brief Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// \brief Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// \brief If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// \brief Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// \brief Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// \brief Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> CUDADeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> CUDAKnownEmittedFns; /// A partial call graph maintained during CUDA compilation to support /// deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to CUDAKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> CUDACallGraph; /// Diagnostic builder for CUDA errors which may or may not be deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class CUDADiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; CUDADiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); ~CUDADiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (CUDADiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a CUDADiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const CUDADiagBuilder &operator<<(const CUDADiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiag.hasValue()) *Diag.PartialDiag << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<PartialDiagnostic> PartialDiag; }; /// Creates a CUDADiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. CUDADiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a CUDADiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. CUDADiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const AttributeList *Attr); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// \name Code completion //@{ /// \brief Describes the context in which code completion occurs. enum ParserCompletionContext { /// \brief Code completion occurs at top-level or namespace context. PCC_Namespace, /// \brief Code completion occurs within a class, struct, or union. PCC_Class, /// \brief Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// \brief Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// \brief Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// \brief Code completion occurs following one or more template /// headers. PCC_Template, /// \brief Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// \brief Code completion occurs within an expression. PCC_Expression, /// \brief Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// \brief Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// \brief Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// \brief Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// \brief Code completion occurs where only a type is permitted. PCC_Type, /// \brief Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// \brief Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args); void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteReturn(Scope *S); void CodeCompleteAfterIf(Scope *S); void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, bool IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARM(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS); void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// \brief Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD); /// \brief Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// \brief Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// \brief Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// \brief A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// \brief Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const Expr * const *ExprArgs); /// \brief Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// \brief The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// \brief Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// \brief To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; private: /// \brief Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// \brief Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// \brief Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// \brief Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// \brief This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// \brief This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); }; /// \brief RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, IsDecltype); } EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, Sema::ReuseLambdaContextDecl, IsDecltype); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList, nullptr, false); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// \brief Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// \brief The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
traversal.h
#ifndef traversal_h #define traversal_h #include "kernel.h" #include "logger.h" #include "thread.h" #include "morton_key.h" #if EXAFMM_COUNT_KERNEL #define countKernel(N) N++ #else #define countKernel(N) #endif namespace exafmm { class Traversal { private: const int nspawn; //!< Threshold of NBODY for spawning new threads const int images; //!< Number of periodic image sublevels int (* listOffset)[3]; //!< Offset in interaction lists int (* lists)[3]; //!< Interaction lists #if EXAFMM_COUNT_KERNEL real_t numP2P; //!< Number of P2P kernel calls real_t numM2L; //!< Number of M2L kernel calls #endif C_iter Ci0; //!< Iterator of first target cell C_iter Cj0; //!< Iterator of first source cell private: #if EXAFMM_COUNT_LIST std::vector<std::vector<int> > remoteInteractionList; //!< Keep track of per cell interaction with remote cells int mpirank; //!< The MPI rank int mpisize; //!< The MPI size int remoteNumP2P; //!< Number of remote P2P interactions int remoteNumM2L; //!< Number of remote M2L interactions //! Accumulate interaction list size of cells void countList(C_iter Ci, C_iter Cj, bool mutual, bool isP2P, int remote = 0) { if (isP2P) Ci->numP2P++; // If P2P, increment P2P counter of target cell else Ci->numM2L++; // Else, increment M2L counter of target cell if (mutual) { // If mutual interaction in on if (isP2P) Cj->numP2P++; // If P2P, increment P2P counter of source cell else Cj->numM2L++; // Else, increment M2L counter of source cell } // End if for mutual interaction if(mpisize > 0 && isP2P) { // Ignore if mpisize was not initialized in the constructor int ichild = (Ci0 + Ci->IPARENT)->ICHILD; int nchild = (Ci0 + Ci->IPARENT)->NCHILD; C_iter cc = Ci0 + ichild; int i = 0; for(; i < nchild; ++i) { if((cc+i)->IBODY == Ci->IBODY) break; } ichild+=i; //int nbody = (Ci0 +ichild)->NBODY; if(!remote) { remoteInteractionList[ichild][mpirank] = Ci->NBODY; } else { if(Cj->NBODY > 0) remoteInteractionList[ichild][Cj->BODY->IRANK] += Cj->NBODY; } } if(remote) { if(isP2P) remoteNumP2P++; else remoteNumM2L++; } } #else void countList(C_iter, C_iter, bool, bool, int) {} #endif #if EXAFMM_USE_WEIGHT //! Accumulate interaction weights of cells void countWeight(C_iter Ci, C_iter Cj, bool mutual, real_t weight) { Ci->WEIGHT += weight; // Increment weight of target cell if (mutual) Cj->WEIGHT += weight; // Increment weight of source cell } #else void countWeight(C_iter, C_iter, bool, real_t) {} #endif //! Get 3-D index from periodic key ivec3 getPeriodicIndex(int key) { ivec3 iX; // Initialize 3-D periodic index iX[0] = key % 3; // x periodic index iX[1] = (key / 3) % 3; // y periodic index iX[2] = key / 9; // z periodic index iX -= 1; // {0,1,2} -> {-1,0,1} return iX; // Return 3-D periodic index } //! Get interaction list void getList(int itype, int icell, int * list, int * periodicKey, int & numList) { int ilast = listOffset[icell][itype]; // Initialize list pointer numList = 0; // Initialize list size while (ilast >= 0) { // While pointer exists if (lists[ilast][1] >= 0) { // If pointer is valid list[numList] = lists[ilast][1]; // Load interaction list periodicKey[numList] = lists[ilast][2]; // Load periodic key numList++; // Increment list size } // End if for valid pointer ilast = lists[ilast][0]; // Increment pointer } // End while loop for pointer existence } //! Set one interaction list void setList(int itype, int icell, int list, int periodicKey, int & numLists) { lists[numLists][0] = listOffset[icell][itype]; // Store list pointer lists[numLists][1] = list; // Store list lists[numLists][2] = periodicKey; // Store periodicKey listOffset[icell][itype] = numLists; // Store list size numLists++; // Increment list size } //! Set all interaction lists void setLists(Cells & icells) { int numCells = icells.size(); // Number of cells int childs[216], neighbors[27]; // Array of parents' neighbors' children and neighbors int childKeys[216], neighborKeys[27]; // Periodic keys for (int i = 0; i < numCells; i++) { // Loop over number of cells for (int j = 0; j < 3; j++) { // Loop over list types listOffset[i][j] = -1; // Set initial value to -1 } // End loop over list types } // End loop over number of cells int numLists = 0; // Initialize number of lists if (images == 0) { // If non-periodic boundary condition setList(2, 0, 0, 13, numLists); // Push root cell into list } else { // If periodic boundary condition for (int i = 0; i < 27; i++) { // Loop over periodic images setList(2, 0, 0, i, numLists); // Push root cell into list } // End loop over periodic images } // End if for periodic boundary condition for (int icell = 1; icell < numCells; icell++) { // Loop over target cells C_iter Ci = Ci0 + icell; // Iterator of current target cell int iparent = Ci->IPARENT; // Index of parent target cell int numNeighbors; // Number of neighbor parents getList(2, iparent, neighbors, neighborKeys, numNeighbors);// Get list of neighbors ivec3 iX = morton::getIndex(Ci->ICELL); // Get 3-D index from key int nchilds = 0; // Initialize number of parents' neighbors' children for (int i = 0; i < numNeighbors; i++) { // Loop over parents' neighbors int jparent = neighbors[i]; // Index of parent source cell int parentKey = neighborKeys[i]; // Periodic key of parent source cell C_iter Cj = Cj0 + jparent; // Iterator of parent source cell for (int j = 0; j < Cj->NCHILD; j++) { // Loop over children of parents' neighbors int jcell = Cj->ICHILD + j; // Index of source cell childs[nchilds] = jcell; // Store index of source cell childKeys[nchilds] = parentKey; // Store periodic key of source cell nchilds++; // Increment number of parents' neighbors' children } // End loop over children of parents' neighbors } // End loop over parents' neighbors for (int i = 0; i < nchilds; i++) { // Loop over children of parents' neighbors int jcell = childs[i]; // Index of source cell int periodicKey = childKeys[i]; // Periodic key of source cell C_iter Cj = Cj0 + jcell; // Iterator of source cell ivec3 jX = morton::getIndex(Cj->ICELL); // 3-D index of source cell ivec3 pX = getPeriodicIndex(periodicKey); // 3-D periodic index of source cell int level = morton::getLevel(Cj->ICELL); // Level of source cell jX += pX * (1 << level); // Periodic image shift if (iX[0] - 1 <= jX[0] && jX[0] <= iX[0] + 1 && // If neighbor in x dimension and iX[1] - 1 <= jX[1] && jX[1] <= iX[1] + 1 && // in y dimension and iX[2] - 1 <= jX[2] && jX[2] <= iX[2] + 1) { // in z dimension setList(2, icell, jcell, periodicKey, numLists); // Store neighbor list (not P2P unless leaf) } else { // If non-neighbor setList(1, icell, jcell, periodicKey, numLists); // Store M2L list } // End if for non-neighbor } // End loop over children of parents' neighbors } // End loop over target cells for (int icell = 0; icell < numCells; icell++) { // Loop over target cells C_iter Ci = Ci0 + icell; // Iterator of target cell if (Ci->NCHILD == 0) { // If taget cell is leaf int numNeighbors; // Number of neighbors getList(2, icell, neighbors, neighborKeys, numNeighbors);// Get list of neighbor cells for (int j = 0; j < numNeighbors; j++) { // Loop over neighbor cells int jcell = neighbors[j]; // Index of source cell int periodicKey = neighborKeys[j]; // Periodic key of source cell C_iter Cj = Cj0 + jcell; // Iterator of source cell if (Cj->NCHILD == 0) { // If source cell is leaf setList(0, icell, jcell, periodicKey, numLists); // Store P2P list } // End if for source cell leaf } // End loop over neighbor cells } // End if for target cell leaf } // End loop over target cells } //! Split cell and call traverse() recursively for child void splitCell(C_iter Ci, C_iter Cj, bool mutual, real_t remote) { if (Cj->NCHILD == 0) { // If Cj is leaf assert(Ci->NCHILD > 0); // Make sure Ci is not leaf for (C_iter ci = Ci0 + Ci->ICHILD; ci != Ci0 + Ci->ICHILD + Ci->NCHILD; ci++) { // Loop over Ci's children dualTreeTraversal(ci, Cj, mutual, remote); // Traverse a single pair of cells } // End loop over Ci's children } else if (Ci->NCHILD == 0) { // Else if Ci is leaf assert(Cj->NCHILD > 0); // Make sure Cj is not leaf for (C_iter cj = Cj0 + Cj->ICHILD; cj != Cj0 + Cj->ICHILD + Cj->NCHILD; cj++) { // Loop over Cj's children dualTreeTraversal(Ci, cj, mutual, remote); // Traverse a single pair of cells } // End loop over Cj's children } else if (Ci->NBODY + Cj->NBODY >= nspawn || (mutual && Ci == Cj)) {// Else if cells are still large TraverseRange traverseRange(this, Ci0 + Ci->ICHILD, Ci0 + Ci->ICHILD + Ci->NCHILD, // Instantiate recursive functor Cj0 + Cj->ICHILD, Cj0 + Cj->ICHILD + Cj->NCHILD, mutual, remote); traverseRange(); // Traverse for range of cell pairs } else if (Ci->R >= Cj->R) { // Else if Ci is larger than Cj for (C_iter ci = Ci0 + Ci->ICHILD; ci != Ci0 + Ci->ICHILD + Ci->NCHILD; ci++) { // Loop over Ci's children dualTreeTraversal(ci, Cj, mutual, remote); // Traverse a single pair of cells } // End loop over Ci's children } else { // Else if Cj is larger than Ci for (C_iter cj = Cj0 + Cj->ICHILD; cj != Cj0 + Cj->ICHILD + Cj->NCHILD; cj++) { // Loop over Cj's children dualTreeTraversal(Ci, cj, mutual, remote); // Traverse a single pair of cells } // End loop over Cj's children } // End if for leafs and Ci Cj size } //! Dual tree traversal for a single pair of cells void dualTreeTraversal(C_iter Ci, C_iter Cj, bool mutual, real_t remote) { vec3 dX = Ci->X - Cj->X - kernel::Xperiodic; // Distance vector from source to target real_t R2 = norm(dX); // Scalar distance squared #if EXAFMM_NO_M2L if (Ci->NCHILD == 0 && Cj->NCHILD == 0) { // If distance is far enough if (R2 == 0 && Ci == Cj) { // If source and target are same kernel::P2P(Ci); // P2P kernel for single cell } else { // Else if source and target are different kernel::P2P(Ci, Cj, mutual); // P2P kernel for pair of cells } // End if for same source and target countKernel(numP2P); // Increment P2P counter countList(Ci, Cj, mutual, true, remote); // Increment P2P list countWeight(Ci, Cj, mutual, remote); // Increment P2P weight } else { // Else if cells are close but not bodies splitCell(Ci, Cj, mutual, remote); // Split cell and call function recursively for child } // End if for multipole acceptance #else if (R2 > (Ci->R + Cj->R) * (Ci->R + Cj->R) * (1 - 1e-3)) { // If distance is far enough kernel::M2L(Ci, Cj, mutual); // M2L kernel countKernel(numM2L); // Increment M2L counter countList(Ci, Cj, mutual, false, remote); // Increment M2L list countWeight(Ci, Cj, mutual, remote); // Increment M2L weight } else if (Ci->NCHILD == 0 && Cj->NCHILD == 0) { // Else if both cells are bodies #if EXAFMM_NO_P2P int index = Ci->ICELL; int iX[3] = {0, 0, 0}; int d = 0, level = 0; while ( index != 0 ) { iX[d] += (index % 2) * (1 << level); index >>= 1; d = (d + 1) % 3; if ( d == 0 ) level++; } index = Cj->ICELL; int jX[3] = {0, 0, 0}; d = 0; level = 0; while ( index != 0 ) { jX[d] += (index % 2) * (1 << level); index >>= 1; d = (d + 1) % 3; if ( d == 0 ) level++; } int isNeighbor = 1; for (d = 0; d < 3; d++) { if (kernel::Xperiodic[d] > 1e-3) jX[d] += 5; if (kernel::Xperiodic[d] < -1e-3) jX[d] -= 5; isNeighbor &= abs(iX[d] - jX[d]) <= 1; } #endif if (Cj->NBODY == 0) { // If the bodies weren't sent from remote node std::cout << "Warning: icell " << Ci->ICELL << " needs bodies from jcell" << Cj->ICELL << std::endl; kernel::M2L(Ci, Cj, mutual); // M2L kernel countKernel(numM2L); // Increment M2L counter countList(Ci, Cj, mutual, false, remote); // Increment M2L list countWeight(Ci, Cj, mutual, remote); // Increment M2L weight #if EXAFMM_NO_P2P } else if (!isNeighbor) { // If GROAMCS handles neighbors kernel::M2L(Ci, Cj, mutual); // M2L kernel countKernel(numM2L); // Increment M2L counter countList(Ci, Cj, mutual, false, remote); // Increment M2L list countWeight(Ci, Cj, mutual, remote); // Increment M2L weight } else { countList(Ci, Cj, mutual, true, remote); // Increment P2P list #else } else { if (R2 == 0 && Ci == Cj) { // If source and target are same kernel::P2P(Ci); // P2P kernel for single cell } else { // Else if source and target are different kernel::P2P(Ci, Cj, mutual); // P2P kernel for pair of cells } // End if for same source and target countKernel(numP2P); // Increment P2P counter countList(Ci, Cj, mutual, true, remote); // Increment P2P list countWeight(Ci, Cj, mutual, remote); // Increment P2P weight #endif } // End if for bodies } else { // Else if cells are close but not bodies splitCell(Ci, Cj, mutual, remote); // Split cell and call function recursively for child } // End if for multipole acceptance #endif } //! Recursive functor for dual tree traversal of a range of Ci and Cj struct TraverseRange { Traversal * traversal; //!< Traversal object C_iter CiBegin; //!< Begin iterator of target cells C_iter CiEnd; //!< End iterator of target cells C_iter CjBegin; //!< Begin Iterator of source cells C_iter CjEnd; //!< End iterator of source cells bool mutual; //!< Flag for mutual interaction real_t remote; //!< Weight for remote work load TraverseRange(Traversal * _traversal, C_iter _CiBegin, C_iter _CiEnd,// Constructor C_iter _CjBegin, C_iter _CjEnd, bool _mutual, real_t _remote) : traversal(_traversal), CiBegin(_CiBegin), CiEnd(_CiEnd),// Initialize variables CjBegin(_CjBegin), CjEnd(_CjEnd), mutual(_mutual), remote(_remote) {} void operator() () { // Overload operator() Tracer tracer; // Instantiate tracer logger::startTracer(tracer); // Start tracer if (CiEnd - CiBegin == 1 || CjEnd - CjBegin == 1) { // If only one cell in range if (CiBegin == CjBegin) { // If Ci == Cj assert(CiEnd == CjEnd); // Check if mutual & self interaction traversal->dualTreeTraversal(CiBegin, CjBegin, mutual, remote);// Call traverse for single pair } else { // If Ci != Cj for (C_iter Ci = CiBegin; Ci != CiEnd; Ci++) { // Loop over all Ci cells for (C_iter Cj = CjBegin; Cj != CjEnd; Cj++) { // Loop over all Cj cells traversal->dualTreeTraversal(Ci, Cj, mutual, remote);// Call traverse for single pair } // End loop over all Cj cells } // End loop over all Ci cells } // End if for Ci == Cj } else { // If many cells are in the range C_iter CiMid = CiBegin + (CiEnd - CiBegin) / 2; // Split range of Ci cells in half C_iter CjMid = CjBegin + (CjEnd - CjBegin) / 2; // Split range of Cj cells in half mk_task_group; // Initialize task group { TraverseRange leftBranch(traversal, CiBegin, CiMid, // Instantiate recursive functor CjBegin, CjMid, mutual, remote); create_taskc(leftBranch); // Ci:former Cj:former TraverseRange rightBranch(traversal, CiMid, CiEnd, // Instantiate recursive functor CjMid, CjEnd, mutual, remote); rightBranch(); // Ci:latter Cj:latter wait_tasks; // Synchronize task group } { TraverseRange leftBranch(traversal, CiBegin, CiMid, // Instantiate recursive functor CjMid, CjEnd, mutual, remote); create_taskc(leftBranch); // Ci:former Cj:latter if (!mutual || CiBegin != CjBegin) { // Exclude mutual & self interaction TraverseRange rightBranch(traversal, CiMid, CiEnd,// Instantiate recursive functor CjBegin, CjMid, mutual, remote); rightBranch(); // Ci:latter Cj:former } else { // If mutual or self interaction assert(CiEnd == CjEnd); // Check if mutual & self interaction } // End if for mutual & self interaction wait_tasks; // Synchronize task group } } // End if for many cells in range logger::stopTracer(tracer); // Stop tracer } // End overload operator() }; #if EXAFMM_WITH_TBB || EXAFMM_WITH_MTHREAD class ExecListBasedM2L { private: C_iter Ci0; C_iter Cj0; vec3 cycle; bool mutual; real_t remote; Traversal* traversal; public: ExecListBasedM2L(C_iter _ci0, C_iter _cj0, vec3 _cycle, bool _mutual, real_t _remote, Traversal* _traversal) : Ci0(_ci0), Cj0(_cj0), cycle(_cycle), mutual(_mutual), remote(_remote), traversal(_traversal) {} void operator()(tbb::blocked_range<int> r) const { int list[189], periodicKeys[189]; // Current interaction list for (int icell = r.begin(); icell < r.end(); icell++) { // Loop over target cells C_iter Ci = Ci0 + icell; // Iterator of target cell int nlist; // Interaction list size traversal->getList(1, icell, list, periodicKeys, nlist); // Get M2L interaction list for (int ilist = 0; ilist < nlist; ilist++) { // Loop over M2L interaction list int jcell = list[ilist]; // Index of source cell int periodicKey = periodicKeys[ilist]; // Periodic key C_iter Cj = Cj0 + jcell; // Iterator of source cell ivec3 pX = traversal->getPeriodicIndex(periodicKey); // 3-D periodic index of source cell for (int d = 0; d < 3; d++) { // Loop over dimensions kernel::Xperiodic[d] = pX[d] * cycle[d]; // Periodic coordinate offset } // End loop over dimensions kernel::M2L(Ci, Cj, mutual); // M2L kernel //countKernel(numM2L); // Increment M2L counter traversal->countList(Ci, Cj, mutual, false, remote); // Increment M2L list traversal->countWeight(Ci, Cj, mutual, remote); // Increment M2L weight } // End loop over M2L interaction list } // End loop over target cells } }; class ExecListBasedP2P { private: C_iter Ci0; C_iter Cj0; vec3 cycle; bool mutual; real_t remote; Traversal* traversal; public: ExecListBasedP2P(C_iter _ci0, C_iter _cj0, vec3 _cycle, bool _mutual, real_t _remote, Traversal* _traversal) : Ci0(_ci0), Cj0(_cj0), cycle(_cycle), mutual(_mutual), remote(_remote), traversal(_traversal) {} void operator()(tbb::blocked_range<int> r) const { int list[189], periodicKeys[189]; // Current interaction list for (int icell = r.begin(); icell < r.end(); icell++) { // Loop over target cells C_iter Ci = Ci0 + icell; // Iterator of target cell if (Ci->NCHILD == 0) { // If target cell is leaf int nlist; // Interaction list size traversal->getList(0, icell, list, periodicKeys, nlist); // Get P2P interaction list for (int ilist = 0; ilist < nlist; ilist++) { // Loop over P2P interaction list int jcell = list[ilist]; // Index of source cell int periodicKey = periodicKeys[ilist]; // Periodic key C_iter Cj = Cj0 + jcell; // Iterator of source cell ivec3 pX = traversal->getPeriodicIndex(periodicKey);// 3-D periodic index of source cell for (int d = 0; d < 3; d++) { // Loop over dimensions kernel::Xperiodic[d] = pX[d] * cycle[d]; // Periodic coordinate offset } // End loop over dimensions kernel::P2P(Ci, Cj, mutual); // P2P kernel //countKernel(numP2P); // Increment P2P counter traversal->countList(Ci, Cj, mutual, true, remote); // Increment P2P list traversal->countWeight(Ci, Cj, mutual, remote); // Increment P2P weight } // End loop over P2P interaction list } // End if for target cell leaf } // End loop over target cells } }; #endif //! List based traversal void listBasedTraversal(int numCells, vec3 cycle, bool mutual, real_t remote) { #if EXAFMM_WITH_TBB || EXAFMM_WITH_MTHREAD tbb::blocked_range<int> b (0,numCells); ExecListBasedM2L m2l(Ci0, Cj0, cycle, mutual, remote, this); ExecListBasedP2P p2p(Ci0, Cj0, cycle, mutual, remote, this); #if EXAFMM_WITH_MTHREAD mtbb::parallel_for(b, m2l); mtbb::parallel_for(b, p2p); #else tbb::parallel_for(b, m2l); tbb::parallel_for(b, p2p); #endif #else #if !EXAFMM_WIT_TBB && !EXAFMM_WITH_CILK int list[189], periodicKeys[189]; // Current interaction list #endif #if EXAFMM_WITH_CILK cilk_for (int icell = 0; icell < numCells; icell++) { // Loop over target cells int list[189], periodicKeys[189]; // Current interaction list #else #pragma omp parallel for private(list, periodicKeys) schedule(dynamic) for (int icell = 0; icell < numCells; icell++) { // Loop over target cells #endif C_iter Ci = Ci0 + icell; // Iterator of target cell int nlist; // Interaction list size getList(1, icell, list, periodicKeys, nlist); // Get M2L interaction list for (int ilist = 0; ilist < nlist; ilist++) { // Loop over M2L interaction list int jcell = list[ilist]; // Index of source cell int periodicKey = periodicKeys[ilist]; // Periodic key C_iter Cj = Cj0 + jcell; // Iterator of source cell ivec3 pX = getPeriodicIndex(periodicKey); // 3-D periodic index of source cell for (int d = 0; d < 3; d++) { // Loop over dimensions kernel::Xperiodic[d] = pX[d] * cycle[d]; // Periodic coordinate offset } // End loop over dimensions kernel::M2L(Ci, Cj, mutual); // M2L kernel //countKernel(numM2L); // Increment M2L counter countList(Ci, Cj, mutual, false, remote); // Increment M2L list countWeight(Ci, Cj, mutual, remote); // Increment M2L weight } // End loop over M2L interaction list } // End loop over target cells #ifndef EXAFMM_NO_P2P #if EXAFMM_WITH_CILK cilk_for (int icell = 0; icell < numCells; icell++) { // Loop over target cells int list[189], periodicKeys[189]; // Current interaction list #else #pragma omp parallel for private(list, periodicKeys) schedule(dynamic) for (int icell = 0; icell < numCells; icell++) { // Loop over target cells #endif C_iter Ci = Ci0 + icell; // Iterator of target cell if (Ci->NCHILD == 0) { // If target cell is leaf int nlist; // Interaction list size getList(0, icell, list, periodicKeys, nlist); // Get P2P interaction list for (int ilist = 0; ilist < nlist; ilist++) { // Loop over P2P interaction list int jcell = list[ilist]; // Index of source cell int periodicKey = periodicKeys[ilist]; // Periodic key C_iter Cj = Cj0 + jcell; // Iterator of source cell ivec3 pX = getPeriodicIndex(periodicKey); // 3-D periodic index of source cell for (int d = 0; d < 3; d++) { // Loop over dimensions kernel::Xperiodic[d] = pX[d] * cycle[d]; // Periodic coordinate offset } // End loop over dimensions kernel::P2P(Ci, Cj, mutual); // P2P kernel //countKernel(numP2P); // Increment P2P counter countList(Ci, Cj, mutual, true, remote); // Increment P2P list countWeight(Ci, Cj, mutual, remote); // Increment P2P weight } // End loop over P2P interaction list } // End if for target cell leaf } // End loop over target cells #endif #endif } //! Tree traversal of periodic cells void traversePeriodic(vec3 cycle) { logger::startTimer("Traverse periodic"); // Start timer Cells pcells; pcells.resize(27); // Create cells C_iter Ci = pcells.end() - 1; // Last cell is periodic parent cell *Ci = *Cj0; // Copy values from source root Ci->ICHILD = 0; // Child cells for periodic center cell Ci->NCHILD = 26; // Number of child cells for periodic center cell C_iter C0 = Cj0; // Placeholder for Cj0 for (int level = 0; level < images - 1; level++) { // Loop over sublevels of tree for (int ix = -1; ix <= 1; ix++) { // Loop over x periodic direction for (int iy = -1; iy <= 1; iy++) { // Loop over y periodic direction for (int iz = -1; iz <= 1; iz++) { // Loop over z periodic direction if (ix != 0 || iy != 0 || iz != 0) { // If periodic cell is not at center for (int cx = -1; cx <= 1; cx++) { // Loop over x periodic direction (child) for (int cy = -1; cy <= 1; cy++) { // Loop over y periodic direction (child) for (int cz = -1; cz <= 1; cz++) { // Loop over z periodic direction (child) kernel::Xperiodic[0] = (ix * 3 + cx) * cycle[0];// Coordinate offset for x periodic direction kernel::Xperiodic[1] = (iy * 3 + cy) * cycle[1];// Coordinate offset for y periodic direction kernel::Xperiodic[2] = (iz * 3 + cz) * cycle[2];// Coordinate offset for z periodic direction kernel::M2L(Ci0, Ci, false); // M2L kernel } // End loop over z periodic direction (child) } // End loop over y periodic direction (child) } // End loop over x periodic direction (child) } // Endif for periodic center cell } // End loop over z periodic direction } // End loop over y periodic direction } // End loop over x periodic direction #if EXAFMM_MASS for (int i = 1; i < NTERM; i++) Ci->M[i] *= Ci->M[0]; // Normalize multipole expansion coefficients #endif Cj0 = pcells.begin(); // Redefine Cj0 for M2M C_iter Cj = Cj0; // Iterator of periodic neighbor cells for (int ix = -1; ix <= 1; ix++) { // Loop over x periodic direction for (int iy = -1; iy <= 1; iy++) { // Loop over y periodic direction for (int iz = -1; iz <= 1; iz++) { // Loop over z periodic direction if (ix != 0 || iy != 0 || iz != 0) { // If periodic cell is not at center Cj->X[0] = Ci->X[0] + ix * cycle[0]; // Set new x coordinate for periodic image Cj->X[1] = Ci->X[1] + iy * cycle[1]; // Set new y cooridnate for periodic image Cj->X[2] = Ci->X[2] + iz * cycle[2]; // Set new z coordinate for periodic image Cj->M = Ci->M; // Copy multipoles to new periodic image Cj++; // Increment periodic cell iterator } // Endif for periodic center cell } // End loop over z periodic direction } // End loop over y periodic direction } // End loop over x periodic direction Ci->M = 0; // Reset multipoles of periodic parent kernel::M2M(Ci, Cj0); // Evaluate periodic M2M kernels for this sublevel #if EXAFMM_MASS for (int i = 1; i < NTERM; i++) Ci->M[i] /= Ci->M[0]; // Normalize multipole expansion coefficients #endif cycle *= 3; // Increase center cell size three times Cj0 = C0; // Reset Cj0 back } // End loop over sublevels of tree #if EXAFMM_MASS Ci0->L /= Ci0->M[0]; // Normalize local expansion coefficients #endif logger::stopTimer("Traverse periodic"); // Stop timer } public: //! Constructor Traversal(int _nspawn, int _images) : // Constructor nspawn(_nspawn), images(_images) // Initialize variables #if EXAFMM_COUNT_KERNEL , numP2P(0), numM2L(0) #endif #if EXAFMM_COUNT_LIST , mpirank(0), mpisize(0), remoteNumM2L(0),remoteNumP2P(0) #endif {} #if EXAFMM_COUNT_LIST Traversal(int _nspawn, int _images, int _mpirank, int _mpisize) :// Constructor nspawn(_nspawn), images(_images), mpirank(_mpirank), mpisize(_mpisize) ,remoteNumM2L(0),remoteNumP2P(0) // Initialize variables #else Traversal(int _nspawn, int _images,int ,int) : // Constructor nspawn(_nspawn), images(_images) // Initialize variables #endif #if EXAFMM_COUNT_KERNEL , numP2P(0), numM2L(0) #endif { } #if EXAFMM_COUNT_LIST //! Initialize size of P2P and M2L interaction lists per cell void initListCount(Cells & cells) { for (C_iter C = cells.begin(); C != cells.end(); C++) { // Loop over cells C->numP2P = C->numM2L = 0; // Initialize size of interaction list } // End loop over cells if(mpisize>0) // Ignore if MPI size is not set in the constructor remoteInteractionList = std::vector<std::vector<int> >(cells.size(),std::vector<int>(mpisize,0)); remoteNumM2L = 0; remoteNumP2P = 0; } std::vector<std::vector<int> >& getRemoteInteractionList() { return remoteInteractionList; } int getRemoteP2PCount() { return remoteNumP2P; } int getRemoteM2LCount() { return remoteNumM2L; } #else void initListCount(Cells) {} #endif #if EXAFMM_USE_WEIGHT //! Initialize interaction weights of bodies and cells void initWeight(Cells & cells) { for (C_iter C = cells.begin(); C != cells.end(); C++) { // Loop over cells C->WEIGHT = 0; // Initialize cell weights if (C->NCHILD == 0) { // If leaf cell for (B_iter B = C->BODY; B != C->BODY + C->NBODY; B++) { // Loop over bodies in cell B->WEIGHT = 0; // Initialize body weights } // End loop over bodies in cell } // End if for leaf cell } // End loop over cells } #else void initWeight(Cells) {} #endif #if EXAFMM_COUNT_KERNEL void initCountKernel() { numP2P = 0; numM2L = 0; } #else void initCountKernel() {} #endif //! Evaluate P2P and M2L using list based traversal void traverse(Cells & icells, Cells & jcells, vec3 cycle, bool dual, bool mutual, real_t remote = 1) { if (icells.empty() || jcells.empty()) return; // Quit if either of the cell vectors are empty logger::startTimer("Traverse"); // Start timer logger::initTracer(); // Initialize tracer Ci0 = icells.begin(); // Iterator of first target cell Cj0 = jcells.begin(); // Iterator of first source cell kernel::Xperiodic = 0; // Set periodic coordinate offset to 0 if (dual) { // If dual tree traversal if (images == 0) { // If non-periodic boundary condition dualTreeTraversal(Ci0, Cj0, mutual, remote); // Traverse the tree } else { // If periodic boundary condition for (int ix = -1; ix <= 1; ix++) { // Loop over x periodic direction for (int iy = -1; iy <= 1; iy++) { // Loop over y periodic direction for (int iz = -1; iz <= 1; iz++) { // Loop over z periodic direction kernel::Xperiodic[0] = ix * cycle[0]; // Coordinate shift for x periodic direction kernel::Xperiodic[1] = iy * cycle[1]; // Coordinate shift for y periodic direction kernel::Xperiodic[2] = iz * cycle[2]; // Coordinate shift for z periodic direction dualTreeTraversal(Ci0, Cj0, false, remote); // Traverse the tree for this periodic image } // End loop over z periodic direction } // End loop over y periodic direction } // End loop over x periodic direction traversePeriodic(cycle); // Traverse tree for periodic images } // End if for periodic boundary condition } else { // If list based traversal int numCells = icells.size(); // Number of cells mutual = false; // Set mutual interaction flag to false listOffset = new int [numCells][3](); // Offset of interaction lists lists = new int [(216 + 27)*numCells][3](); // All interaction lists setLists(icells); // Set P2P and M2L interaction lists listBasedTraversal(numCells, cycle, mutual, remote); // Traverse the tree if (images != 0) { // If periodic boundary condition traversePeriodic(cycle); // Traverse tree for periodic images } // End if for periodic boundary condition delete[] listOffset; // Deallocate offset of lists delete[] lists; // Deallocate lists } // End if for dual tree traversal logger::stopTimer("Traverse"); // Stop timer logger::writeTracer(); // Write tracer to file } struct DirectRecursion { C_iter Ci; //!< Iterator of target cell C_iter Cj; //!< Iterator of source cell DirectRecursion(C_iter _Ci, C_iter _Cj) : // Constructor Ci(_Ci), Cj(_Cj) {} // Initialize variables void operator() () { // Overload operator if (Ci->NBODY < 25) { // If number of target bodies is less than threshold kernel::P2P(Ci, Cj, false); // Evaluate P2P kernel } else { // If number of target bodies is more than threshold Cells cells; cells.resize(1); // Initialize new cell vector C_iter Ci2 = cells.begin(); // New cell iterator for right branch Ci2->BODY = Ci->BODY + Ci->NBODY / 2; // Set begin iterator to handle latter half Ci2->NBODY = Ci->NBODY - Ci->NBODY / 2; // Set range to handle latter half Ci->NBODY = Ci->NBODY / 2; // Set range to handle first half mk_task_group; // Initialize task group DirectRecursion leftBranch(Ci, Cj); // Instantiate recursive functor create_taskc(leftBranch); // Create new task for left branch DirectRecursion rightBranch(Ci2, Cj); // Instantiate recursive functor rightBranch(); // Use old task for right branch wait_tasks; // Synchronize task group } // End if for NBODY threshold } // End operator }; //! Direct summation void direct(Bodies & ibodies, Bodies & jbodies, vec3 cycle) { Cells cells; cells.resize(2); // Define a pair of cells to pass to P2P kernel C_iter Ci = cells.begin(), Cj = cells.begin() + 1; // First cell is target, second cell is source int prange = 0; // Range of periodic images for (int i = 0; i < images; i++) { // Loop over periodic image sublevels prange += int(std::pow(3., i)); // Accumulate range of periodic images } // End loop over perioidc image sublevels for (int ix = -prange; ix <= prange; ix++) { // Loop over x periodic direction for (int iy = -prange; iy <= prange; iy++) { // Loop over y periodic direction for (int iz = -prange; iz <= prange; iz++) { // Loop over z periodic direction kernel::Xperiodic[0] = ix * cycle[0]; // Coordinate shift for x periodic direction kernel::Xperiodic[1] = iy * cycle[1]; // Coordinate shift for y periodic direction kernel::Xperiodic[2] = iz * cycle[2]; // Coordinate shift for z periodic direction Ci->BODY = ibodies.begin(); // Iterator of first target body Ci->NBODY = ibodies.size(); // Number of target bodies Cj->BODY = jbodies.begin(); // Iterator of first source body Cj->NBODY = jbodies.size(); // Number of source bodies DirectRecursion directRecursion(Ci, Cj); // Instantiate recursive functor directRecursion(); // Recursive call for direct summation } // End loop over z periodic direction } // End loop over y periodic direction } // End loop over x periodic direction } //! Normalize bodies after direct summation void normalize(Bodies & bodies) { #if !EXAFMM_BIOTSAVART for (B_iter B = bodies.begin(); B != bodies.end(); B++) { // Loop over bodies B->TRG /= B->SRC; // Normalize by target charge } // End loop over bodies #endif } //! Write G matrix to file void writeMatrix(Bodies & bodies, Bodies & jbodies) { std::ofstream matrixFile("matrix.dat"); // Open matrix data file for (B_iter Bi = bodies.begin(); Bi != bodies.end(); Bi++) { // Loop over target bodies for (B_iter Bj = jbodies.begin(); Bj != jbodies.end(); Bj++) { // Loop over source bodies vec3 dX = Bi->X - Bj->X; // Distance vector real_t R2 = norm(dX) + kernel::eps2; // Distance squared real_t G = R2 == 0 ? 0.0 : 1.0 / sqrt(R2); // Green's function matrixFile << G << " "; // Write to matrix data file } // End loop over source bodies matrixFile << std::endl; // Line break } // End loop over target bodies } //! Print traversal statistics void printTraversalData() { #if EXAFMM_COUNT_KERNEL if (logger::verbose) { // If verbose flag is true std::cout << "--- Traversal stats --------------" << std::endl// Print title << std::setw(logger::stringLength) << std::left // Set format << "P2P calls" << " : " // Print title << std::setprecision(0) << std::fixed // Set format << numP2P << std::endl // Print number of P2P calls << std::setw(logger::stringLength) << std::left // Set format << "M2L calls" << " : " // Print title << std::setprecision(0) << std::fixed // Set format << numM2L << std::endl; // Print number of M2L calls } // End if for verbose flag #endif } void writeTraversalData(int mpirank, int numBodies, int iteration=0){ #if EXAFMM_COUNT_KERNEL || EXAFMM_COUNT_LIST std::stringstream name; // File name name << "num" << std::setfill('0') << std::setw(6) // Set format << mpirank << ".dat"; // Create file name for list std::ios_base::openmode mode = (iteration == 0)? std::ios_base::out:std::ios::app; std::ofstream listFile(name.str().c_str(),mode); // Open list log file #endif #if EXAFMM_COUNT_KERNEL && EXAFMM_COUNT_LIST listFile << std::setw(logger::stringLength) << std::left // Set format << "P2P calls" << " " << numP2P - remoteNumP2P << std::endl; // Print event and timer listFile << std::setw(logger::stringLength) << std::left // Set format << "M2L calls" << " " << numM2L - remoteNumM2L<< std::endl; // Print event and timer listFile << std::setw(logger::stringLength) << std::left // Set format << "Remote P2P calls" << " " << remoteNumM2L << std::endl; // Print event and timer listFile << std::setw(logger::stringLength) << std::left // Set format << "Remote M2L calls" << " " << remoteNumP2P << std::endl; // Print event and timer listFile << std::setw(logger::stringLength) << std::left // Set format << "Bodies" << " " << numBodies << std::endl; // Print event and timer #elif EXAFMM_COUNT_LIST listFile << std::setw(logger::stringLength) << std::left // Set format << "Remote P2P calls" << " " << remoteNumM2L << std::endl; // Print event and timer listFile << std::setw(logger::stringLength) << std::left // Set format << "Remote M2L calls" << " " << remoteNumP2P << std::endl; // Print event and timer listFile << std::setw(logger::stringLength) << std::left // Set format << "Bodies" << " " << numBodies << std::endl; // Print event and timer #elif EXAFMM_COUNT_KERNEL listFile << std::setw(logger::stringLength) << std::left // Set format << "P2P calls" << " " << numP2P << std::endl; // Print event and timer listFile << std::setw(logger::stringLength) << std::left // Set format << "M2L calls" << " " << numM2L << std::endl; // Print event and timer listFile << std::setw(logger::stringLength) << std::left // Set format << "Bodies" << " " << numBodies << std::endl; // Print event and timer #endif } #if EXAFMM_COUNT_LIST void writeList(Cells cells, int mpirank) { std::stringstream name; // File name name << "list" << std::setfill('0') << std::setw(6) // Set format << mpirank << ".dat"; // Create file name for list std::ofstream listFile(name.str().c_str()); // Open list log file for (C_iter C = cells.begin(); C != cells.end(); C++) { // Loop over all lists listFile << std::setw(logger::stringLength) << std::left// Set format << C->ICELL << " " << C->numP2P << " " << C->numM2L << std::endl; // Print list size } // End loop over all lists } #else void writeList(Cells, int) {} #endif }; } #endif
DeclOpenMP.h
//===- DeclOpenMP.h - Classes for representing OpenMP directives -*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// /// \file /// This file defines OpenMP nodes for declarative directives. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_DECLOPENMP_H #define LLVM_CLANG_AST_DECLOPENMP_H #include "clang/AST/Decl.h" #include "clang/AST/Expr.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/Type.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/Support/TrailingObjects.h" namespace clang { /// This represents '#pragma omp threadprivate ...' directive. /// For example, in the following, both 'a' and 'A::b' are threadprivate: /// /// \code /// int a; /// #pragma omp threadprivate(a) /// struct A { /// static int b; /// #pragma omp threadprivate(b) /// }; /// \endcode /// class OMPThreadPrivateDecl final : public Decl, private llvm::TrailingObjects<OMPThreadPrivateDecl, Expr *> { friend class ASTDeclReader; friend TrailingObjects; unsigned NumVars; virtual void anchor(); OMPThreadPrivateDecl(Kind DK, DeclContext *DC, SourceLocation L) : Decl(DK, DC, L), NumVars(0) { } ArrayRef<const Expr *> getVars() const { return llvm::makeArrayRef(getTrailingObjects<Expr *>(), NumVars); } MutableArrayRef<Expr *> getVars() { return MutableArrayRef<Expr *>(getTrailingObjects<Expr *>(), NumVars); } void setVars(ArrayRef<Expr *> VL); public: static OMPThreadPrivateDecl *Create(ASTContext &C, DeclContext *DC, SourceLocation L, ArrayRef<Expr *> VL); static OMPThreadPrivateDecl *CreateDeserialized(ASTContext &C, unsigned ID, unsigned N); typedef MutableArrayRef<Expr *>::iterator varlist_iterator; typedef ArrayRef<const Expr *>::iterator varlist_const_iterator; typedef llvm::iterator_range<varlist_iterator> varlist_range; typedef llvm::iterator_range<varlist_const_iterator> varlist_const_range; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVars().begin(); } varlist_iterator varlist_end() { return getVars().end(); } varlist_const_iterator varlist_begin() const { return getVars().begin(); } varlist_const_iterator varlist_end() const { return getVars().end(); } static bool classof(const Decl *D) { return classofKind(D->getKind()); } static bool classofKind(Kind K) { return K == OMPThreadPrivate; } }; /// This represents '#pragma omp declare reduction ...' directive. /// For example, in the following, declared reduction 'foo' for types 'int' and /// 'float': /// /// \code /// #pragma omp declare reduction (foo : int,float : omp_out += omp_in) \ /// initializer (omp_priv = 0) /// \endcode /// /// Here 'omp_out += omp_in' is a combiner and 'omp_priv = 0' is an initializer. class OMPDeclareReductionDecl final : public ValueDecl, public DeclContext { // This class stores some data in DeclContext::OMPDeclareReductionDeclBits // to save some space. Use the provided accessors to access it. public: enum InitKind { CallInit, // Initialized by function call. DirectInit, // omp_priv(<expr>) CopyInit // omp_priv = <expr> }; private: friend class ASTDeclReader; /// Combiner for declare reduction construct. Expr *Combiner = nullptr; /// Initializer for declare reduction construct. Expr *Initializer = nullptr; /// In parameter of the combiner. Expr *In = nullptr; /// Out parameter of the combiner. Expr *Out = nullptr; /// Priv parameter of the initializer. Expr *Priv = nullptr; /// Orig parameter of the initializer. Expr *Orig = nullptr; /// Reference to the previous declare reduction construct in the same /// scope with the same name. Required for proper templates instantiation if /// the declare reduction construct is declared inside compound statement. LazyDeclPtr PrevDeclInScope; virtual void anchor(); OMPDeclareReductionDecl(Kind DK, DeclContext *DC, SourceLocation L, DeclarationName Name, QualType Ty, OMPDeclareReductionDecl *PrevDeclInScope); void setPrevDeclInScope(OMPDeclareReductionDecl *Prev) { PrevDeclInScope = Prev; } public: /// Create declare reduction node. static OMPDeclareReductionDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation L, DeclarationName Name, QualType T, OMPDeclareReductionDecl *PrevDeclInScope); /// Create deserialized declare reduction node. static OMPDeclareReductionDecl *CreateDeserialized(ASTContext &C, unsigned ID); /// Get combiner expression of the declare reduction construct. Expr *getCombiner() { return Combiner; } const Expr *getCombiner() const { return Combiner; } /// Get In variable of the combiner. Expr *getCombinerIn() { return In; } const Expr *getCombinerIn() const { return In; } /// Get Out variable of the combiner. Expr *getCombinerOut() { return Out; } const Expr *getCombinerOut() const { return Out; } /// Set combiner expression for the declare reduction construct. void setCombiner(Expr *E) { Combiner = E; } /// Set combiner In and Out vars. void setCombinerData(Expr *InE, Expr *OutE) { In = InE; Out = OutE; } /// Get initializer expression (if specified) of the declare reduction /// construct. Expr *getInitializer() { return Initializer; } const Expr *getInitializer() const { return Initializer; } /// Get initializer kind. InitKind getInitializerKind() const { return static_cast<InitKind>(OMPDeclareReductionDeclBits.InitializerKind); } /// Get Orig variable of the initializer. Expr *getInitOrig() { return Orig; } const Expr *getInitOrig() const { return Orig; } /// Get Priv variable of the initializer. Expr *getInitPriv() { return Priv; } const Expr *getInitPriv() const { return Priv; } /// Set initializer expression for the declare reduction construct. void setInitializer(Expr *E, InitKind IK) { Initializer = E; OMPDeclareReductionDeclBits.InitializerKind = IK; } /// Set initializer Orig and Priv vars. void setInitializerData(Expr *OrigE, Expr *PrivE) { Orig = OrigE; Priv = PrivE; } /// Get reference to previous declare reduction construct in the same /// scope with the same name. OMPDeclareReductionDecl *getPrevDeclInScope(); const OMPDeclareReductionDecl *getPrevDeclInScope() const; static bool classof(const Decl *D) { return classofKind(D->getKind()); } static bool classofKind(Kind K) { return K == OMPDeclareReduction; } static DeclContext *castToDeclContext(const OMPDeclareReductionDecl *D) { return static_cast<DeclContext *>(const_cast<OMPDeclareReductionDecl *>(D)); } static OMPDeclareReductionDecl *castFromDeclContext(const DeclContext *DC) { return static_cast<OMPDeclareReductionDecl *>( const_cast<DeclContext *>(DC)); } }; /// This represents '#pragma omp declare mapper ...' directive. Map clauses are /// allowed to use with this directive. The following example declares a user /// defined mapper for the type 'struct vec'. This example instructs the fields /// 'len' and 'data' should be mapped when mapping instances of 'struct vec'. /// /// \code /// #pragma omp declare mapper(mid: struct vec v) map(v.len, v.data[0:N]) /// \endcode class OMPDeclareMapperDecl final : public ValueDecl, public DeclContext { friend class ASTDeclReader; /// Clauses associated with this mapper declaration MutableArrayRef<OMPClause *> Clauses; /// Mapper variable, which is 'v' in the example above Expr *MapperVarRef = nullptr; /// Name of the mapper variable DeclarationName VarName; LazyDeclPtr PrevDeclInScope; virtual void anchor(); OMPDeclareMapperDecl(Kind DK, DeclContext *DC, SourceLocation L, DeclarationName Name, QualType Ty, DeclarationName VarName, OMPDeclareMapperDecl *PrevDeclInScope) : ValueDecl(DK, DC, L, Name, Ty), DeclContext(DK), VarName(VarName), PrevDeclInScope(PrevDeclInScope) {} void setPrevDeclInScope(OMPDeclareMapperDecl *Prev) { PrevDeclInScope = Prev; } /// Sets an array of clauses to this mapper declaration void setClauses(ArrayRef<OMPClause *> CL); public: /// Creates declare mapper node. static OMPDeclareMapperDecl *Create(ASTContext &C, DeclContext *DC, SourceLocation L, DeclarationName Name, QualType T, DeclarationName VarName, OMPDeclareMapperDecl *PrevDeclInScope); /// Creates deserialized declare mapper node. static OMPDeclareMapperDecl *CreateDeserialized(ASTContext &C, unsigned ID, unsigned N); /// Creates an array of clauses to this mapper declaration and intializes /// them. void CreateClauses(ASTContext &C, ArrayRef<OMPClause *> CL); using clauselist_iterator = MutableArrayRef<OMPClause *>::iterator; using clauselist_const_iterator = ArrayRef<const OMPClause *>::iterator; using clauselist_range = llvm::iterator_range<clauselist_iterator>; using clauselist_const_range = llvm::iterator_range<clauselist_const_iterator>; unsigned clauselist_size() const { return Clauses.size(); } bool clauselist_empty() const { return Clauses.empty(); } clauselist_range clauselists() { return clauselist_range(clauselist_begin(), clauselist_end()); } clauselist_const_range clauselists() const { return clauselist_const_range(clauselist_begin(), clauselist_end()); } clauselist_iterator clauselist_begin() { return Clauses.begin(); } clauselist_iterator clauselist_end() { return Clauses.end(); } clauselist_const_iterator clauselist_begin() const { return Clauses.begin(); } clauselist_const_iterator clauselist_end() const { return Clauses.end(); } /// Get the variable declared in the mapper Expr *getMapperVarRef() { return MapperVarRef; } const Expr *getMapperVarRef() const { return MapperVarRef; } /// Set the variable declared in the mapper void setMapperVarRef(Expr *MapperVarRefE) { MapperVarRef = MapperVarRefE; } /// Get the name of the variable declared in the mapper DeclarationName getVarName() { return VarName; } /// Get reference to previous declare mapper construct in the same /// scope with the same name. OMPDeclareMapperDecl *getPrevDeclInScope(); const OMPDeclareMapperDecl *getPrevDeclInScope() const; static bool classof(const Decl *D) { return classofKind(D->getKind()); } static bool classofKind(Kind K) { return K == OMPDeclareMapper; } static DeclContext *castToDeclContext(const OMPDeclareMapperDecl *D) { return static_cast<DeclContext *>(const_cast<OMPDeclareMapperDecl *>(D)); } static OMPDeclareMapperDecl *castFromDeclContext(const DeclContext *DC) { return static_cast<OMPDeclareMapperDecl *>(const_cast<DeclContext *>(DC)); } }; /// Pseudo declaration for capturing expressions. Also is used for capturing of /// non-static data members in non-static member functions. /// /// Clang supports capturing of variables only, but OpenMP 4.5 allows to /// privatize non-static members of current class in non-static member /// functions. This pseudo-declaration allows properly handle this kind of /// capture by wrapping captured expression into a variable-like declaration. class OMPCapturedExprDecl final : public VarDecl { friend class ASTDeclReader; void anchor() override; OMPCapturedExprDecl(ASTContext &C, DeclContext *DC, IdentifierInfo *Id, QualType Type, TypeSourceInfo *TInfo, SourceLocation StartLoc) : VarDecl(OMPCapturedExpr, C, DC, StartLoc, StartLoc, Id, Type, TInfo, SC_None) { setImplicit(); } public: static OMPCapturedExprDecl *Create(ASTContext &C, DeclContext *DC, IdentifierInfo *Id, QualType T, SourceLocation StartLoc); static OMPCapturedExprDecl *CreateDeserialized(ASTContext &C, unsigned ID); SourceRange getSourceRange() const override LLVM_READONLY; // Implement isa/cast/dyncast/etc. static bool classof(const Decl *D) { return classofKind(D->getKind()); } static bool classofKind(Kind K) { return K == OMPCapturedExpr; } }; /// This represents '#pragma omp requires...' directive. /// For example /// /// \code /// #pragma omp requires unified_address /// \endcode /// class OMPRequiresDecl final : public Decl, private llvm::TrailingObjects<OMPRequiresDecl, OMPClause *> { friend class ASTDeclReader; friend TrailingObjects; // Number of clauses associated with this requires declaration unsigned NumClauses = 0; virtual void anchor(); OMPRequiresDecl(Kind DK, DeclContext *DC, SourceLocation L) : Decl(DK, DC, L), NumClauses(0) {} /// Returns an array of immutable clauses associated with this requires /// declaration ArrayRef<const OMPClause *> getClauses() const { return llvm::makeArrayRef(getTrailingObjects<OMPClause *>(), NumClauses); } /// Returns an array of clauses associated with this requires declaration MutableArrayRef<OMPClause *> getClauses() { return MutableArrayRef<OMPClause *>(getTrailingObjects<OMPClause *>(), NumClauses); } /// Sets an array of clauses to this requires declaration void setClauses(ArrayRef<OMPClause *> CL); public: /// Create requires node. static OMPRequiresDecl *Create(ASTContext &C, DeclContext *DC, SourceLocation L, ArrayRef<OMPClause *> CL); /// Create deserialized requires node. static OMPRequiresDecl *CreateDeserialized(ASTContext &C, unsigned ID, unsigned N); using clauselist_iterator = MutableArrayRef<OMPClause *>::iterator; using clauselist_const_iterator = ArrayRef<const OMPClause *>::iterator; using clauselist_range = llvm::iterator_range<clauselist_iterator>; using clauselist_const_range = llvm::iterator_range<clauselist_const_iterator>; unsigned clauselist_size() const { return NumClauses; } bool clauselist_empty() const { return NumClauses == 0; } clauselist_range clauselists() { return clauselist_range(clauselist_begin(), clauselist_end()); } clauselist_const_range clauselists() const { return clauselist_const_range(clauselist_begin(), clauselist_end()); } clauselist_iterator clauselist_begin() { return getClauses().begin(); } clauselist_iterator clauselist_end() { return getClauses().end(); } clauselist_const_iterator clauselist_begin() const { return getClauses().begin(); } clauselist_const_iterator clauselist_end() const { return getClauses().end(); } static bool classof(const Decl *D) { return classofKind(D->getKind()); } static bool classofKind(Kind K) { return K == OMPRequires; } }; /// This represents '#pragma omp allocate ...' directive. /// For example, in the following, the default allocator is used for both 'a' /// and 'A::b': /// /// \code /// int a; /// #pragma omp allocate(a) /// struct A { /// static int b; /// #pragma omp allocate(b) /// }; /// \endcode /// class OMPAllocateDecl final : public Decl, private llvm::TrailingObjects<OMPAllocateDecl, Expr *> { friend class ASTDeclReader; friend TrailingObjects; /// Number of variable within the allocate directive. unsigned NumVars = 0; virtual void anchor(); OMPAllocateDecl(Kind DK, DeclContext *DC, SourceLocation L) : Decl(DK, DC, L) {} ArrayRef<const Expr *> getVars() const { return llvm::makeArrayRef(getTrailingObjects<Expr *>(), NumVars); } MutableArrayRef<Expr *> getVars() { return MutableArrayRef<Expr *>(getTrailingObjects<Expr *>(), NumVars); } void setVars(ArrayRef<Expr *> VL); public: static OMPAllocateDecl *Create(ASTContext &C, DeclContext *DC, SourceLocation L, ArrayRef<Expr *> VL); static OMPAllocateDecl *CreateDeserialized(ASTContext &C, unsigned ID, unsigned N); typedef MutableArrayRef<Expr *>::iterator varlist_iterator; typedef ArrayRef<const Expr *>::iterator varlist_const_iterator; typedef llvm::iterator_range<varlist_iterator> varlist_range; typedef llvm::iterator_range<varlist_const_iterator> varlist_const_range; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVars().begin(); } varlist_iterator varlist_end() { return getVars().end(); } varlist_const_iterator varlist_begin() const { return getVars().begin(); } varlist_const_iterator varlist_end() const { return getVars().end(); } static bool classof(const Decl *D) { return classofKind(D->getKind()); } static bool classofKind(Kind K) { return K == OMPAllocate; } }; } // end namespace clang #endif
decrypt_md5_parallel.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #include "md5.h" // ******************************* // Funções de tempo // ******************************* struct timeval start, stop; unsigned diffsec, diffusec; int a[2]; void compute_time_and_flush(); void time_log_start() { gettimeofday(&start, NULL); } void time_log_stop() { gettimeofday(&stop, NULL); compute_time_and_flush(); } void compute_time_and_flush() { diffsec = stop.tv_sec - start.tv_sec; diffusec = (stop.tv_usec - start.tv_usec) >= 0 ? (stop.tv_usec - start.tv_usec) : 1000000 - stop.tv_usec; printf("%d.%d seconds\n", diffsec, diffusec); } // ******************************* // MD5 // ******************************* // MD5 functions #define F(x,y,z) ((x & y) | (~x & z)) #define G(x,y,z) ((x & z) | (y & ~z)) #define H(x,y,z) (x ^ y ^ z) #define I(x,y,z) (y ^ (x | ~z)) // Rotate a 32 bit number left by n bits #define ROTATE(x,n) ((x << n) | (x >> (32-n))) // Swap endianess of 32 bit number #define SWAP(x) ( ((x >> 24) & 0x000000ff) | ((x << 8) & 0x00ff0000) \ | ((x >> 8) & 0x0000ff00) | ((x << 24) & 0xff000000) ); // CONTANTS ------------------------------------------------------------------- // Magic numbers static const uint32_t A = 0x67452301, B = 0xEFCDAB89, C = 0x98BADCFE, D = 0x10325476; static const uint32_t K[64] = { 0xd76aa478, 0xe8c7b756, 0x242070db, 0xc1bdceee, 0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501, 0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be, 0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821, 0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa, 0xd62f105d, 0x02441453, 0xd8a1e681, 0xe7d3fbc8, 0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed, 0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a, 0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c, 0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70, 0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x04881d05, 0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665, 0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039, 0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1, 0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1, 0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391 }; // Byte selectors static const uint32_t M[64] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 1, 6, 11, 0, 5, 10, 15, 4, 9, 14, 3, 8, 13, 2, 7, 12, 5, 8, 11, 14, 1, 4, 7, 10, 13, 0, 3, 6, 9, 12, 15, 2, 0, 7, 14, 5, 12, 3, 10, 1, 8, 15, 6, 13, 4, 11, 2, 9 }; // Shift values static const uint32_t S[64] = { 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21 }; // FUNCTIONS ------------------------------------------------------------------ void md5_init (md5_t* self) { self->bufsize = 0; self->bitsize = 0; self->rawhash[0] = A; self->rawhash[1] = B; self->rawhash[2] = C; self->rawhash[3] = D; } void md5_digest (md5_t* self) { uint32_t data[16], a, b, c, d; a = self->rawhash[0]; b = self->rawhash[1]; c = self->rawhash[2]; d = self->rawhash[3]; for (int i = 0, j = 0; i < 16; ++i, j+=4) data[i] = (self->buffer[j] ) + (self->buffer[j + 1] << 8) + (self->buffer[j + 2] << 16) + (self->buffer[j + 3] << 24); for (int i = 0; i < 64; ++i) { uint32_t func, temp; if (i < 16) func = F(b,c,d); else if (i < 32) func = G(b,c,d); else if (i < 48) func = H(b,c,d); else if (i < 64) func = I(b,c,d); temp = d; d = c; c = b; b = b + ROTATE(a + func + data[M[i]] + K[i], S[i]); a = temp; } self->rawhash[0] += a; self->rawhash[1] += b; self->rawhash[2] += c; self->rawhash[3] += d; } void md5_update (md5_t* self, byte_t* data, size_t length) { for (size_t i = 0; i < length; ++i) { self->buffer[self->bufsize] = data[i]; self->bufsize++; if (self->bufsize == 64) { md5_digest(self); self->bitsize += 512; self->bufsize = 0; } } } void md5_hash (md5_t* self, uint32_t hash[4]) { size_t i = self->bufsize; self->buffer[i++] = 0x80; // append single bit to message while (i < 64) self->buffer[i++] = 0x00; // pad with zeros if (self->bufsize >= 55) { md5_digest(self); for (i = 0; i < 64; ++i) self->buffer[i] = 0x00; } self->bitsize += self->bufsize * 8; self->buffer[56] = self->bitsize; self->buffer[57] = self->bitsize >> 8; self->buffer[58] = self->bitsize >> 16; self->buffer[59] = self->bitsize >> 24; self->buffer[60] = self->bitsize >> 32; self->buffer[61] = self->bitsize >> 40; self->buffer[62] = self->bitsize >> 48; self->buffer[63] = self->bitsize >> 56; md5_digest(self); hash[0] = SWAP(self->rawhash[0]); hash[1] = SWAP(self->rawhash[1]); hash[2] = SWAP(self->rawhash[2]); hash[3] = SWAP(self->rawhash[3]); } // =================== // ******************************* // Métodos Comparativos // ******************************* #define NUMERO_DE_THREADS 8 #define TAMANHO_PALAVRA 5 #define TOTAL_CARACTERES 70 const byte_t ARRAY_CARACTERES[] = { 0x21, 0x23, 0x24, 0x25, 0x2b, 0x3d, 0x3f, 0x40, 0x79, 0x7a, // ! # $ % + = ? @ y z 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, // 0 1 2 3 4 5 6 7 8 9 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0X4a, // A B C D E F G H I J 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0X54, // K L M N O P Q R S T 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x61, 0x62, 0x63, 0X64, // U V W X Y Z a b c d 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0X6e, // e f g h i j k l m n 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0X78 // o p q r s t u v w x }; int comparar_hashes (const uint32_t a[], const uint32_t b[]) { return a[0] == b[0] && a[1] == b[1] && a[2] == b[2] && a[3] == b[3]; } int forcar_quebra_hash_MD5 (const uint32_t hashOriginal[4], uint32_t hashGerada[4], md5_t MD5, byte_t* resultado, byte_t* stringTeste, int lengthPalavraAtual) { if (lengthPalavraAtual < TAMANHO_PALAVRA - 1) { for (int i = 0; i < TOTAL_CARACTERES; ++i) { stringTeste[lengthPalavraAtual] = ARRAY_CARACTERES[i]; if (forcar_quebra_hash_MD5(hashOriginal, hashGerada, MD5, resultado, stringTeste, lengthPalavraAtual + 1)) return 1; } } else { for (int i = 0; i < TOTAL_CARACTERES; ++i) { stringTeste[lengthPalavraAtual] = ARRAY_CARACTERES[i]; md5_init(&MD5); md5_update(&MD5, stringTeste, TAMANHO_PALAVRA); md5_hash(&MD5, hashGerada); if (comparar_hashes(hashOriginal, hashGerada)) { strcpy(resultado, stringTeste); return 1; } } } return 0; } // ******************************* // Main // ******************************* int main (){ byte_t str[ TAMANHO_PALAVRA + 1 ]; uint32_t hashOriginal[4]; uint32_t hashGerada[4]; md5_t MD5; char hexstring[33] = {0}; char resultado[TAMANHO_PALAVRA]; int threadId; // Limpar vetor resultado memset(resultado, '\0', TAMANHO_PALAVRA); printf("Informe a hash a ser quebrada: "); fgets(hexstring, 33, stdin); for (int i = 0; i < 4; i++) sscanf(&hexstring[i * 8], "%8x", &hashOriginal[i]); printf("\n===================================\n"); printf("Número de threads: %d\n", NUMERO_DE_THREADS); printf("Número de letras: %d\n", TAMANHO_PALAVRA); printf("===================================\n"); // Iniciar contagem de tempo time_log_start(); static int encontrado = 0; omp_set_num_threads(NUMERO_DE_THREADS); #pragma omp parallel private(threadId, str, hashGerada, MD5) shared(hashOriginal, encontrado) { threadId = omp_get_thread_num() + 1; printf("Thread número %d rodando...\n", threadId); #pragma omp for schedule(dynamic) for (int i = 0; i <= TOTAL_CARACTERES; ++i) { if(encontrado == 0){ str[0] = ARRAY_CARACTERES[i]; encontrado = forcar_quebra_hash_MD5 (hashOriginal, hashGerada, MD5, resultado, str, 1); } } } printf("\n===================================\n"); printf("Texto original: \n%s\n", resultado); printf("===================================\n"); // Mostrar tempo de execução printf("\n===================================\n"); printf("Tempo de Execução:\n"); time_log_stop(); printf("===================================\n"); return 0; }
morphology.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y % % MM MM O O R R P P H H O O L O O G Y Y % % M M M O O RRRR PPPP HHHHH O O L O O G GGG Y % % M M O O R R P H H O O L O O G G Y % % M M OOO R R P H H OOO LLLLL OOO GGG Y % % % % % % MagickCore Morphology Methods % % % % Software Design % % Anthony Thyssen % % January 2010 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Morphology is the application of various kernels, of any size or shape, to an % image in various ways (typically binary, but not always). % % Convolution (weighted sum or average) is just one specific type of % morphology. Just one that is very common for image bluring and sharpening % effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring. % % This module provides not only a general morphology function, and the ability % to apply more advanced or iterative morphologies, but also functions for the % generation of many different types of kernel arrays from user supplied % arguments. Prehaps even the generation of a kernel from a small image. */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache-view.h" #include "magick/color-private.h" #include "magick/channel.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/hashmap.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/monitor-private.h" #include "magick/morphology.h" #include "magick/morphology-private.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/prepress.h" #include "magick/quantize.h" #include "magick/registry.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/splay-tree.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" /* Other global definitions used by module. */ #define Minimize(assign,value) assign=MagickMin(assign,value) #define Maximize(assign,value) assign=MagickMax(assign,value) /* Integer Factorial Function - for a Binomial kernel */ #if 1 static inline size_t fact(size_t n) { size_t l,f; for(f=1, l=2; l <= n; f=f*l, l++); return(f); } #elif 1 /* glibc floating point alternatives */ #define fact(n) ((size_t)tgamma((double)n+1)) #else #define fact(n) ((size_t)lgamma((double)n+1)) #endif /* Currently these are only internal to this module */ static void CalcKernelMetaData(KernelInfo *), ExpandMirrorKernelInfo(KernelInfo *), ExpandRotateKernelInfo(KernelInfo *, const double), RotateKernelInfo(KernelInfo *, double); /* Quick function to find last kernel in a kernel list */ static inline KernelInfo *LastKernelInfo(KernelInfo *kernel) { while (kernel->next != (KernelInfo *) NULL) kernel=kernel->next; return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireKernelInfo() takes the given string (generally supplied by the % user) and converts it into a Morphology/Convolution Kernel. This allows % users to specify a kernel from a number of pre-defined kernels, or to fully % specify their own kernel for a specific Convolution or Morphology % Operation. % % The kernel so generated can be any rectangular array of floating point % values (doubles) with the 'control point' or 'pixel being affected' % anywhere within that array of values. % % Previously IM was restricted to a square of odd size using the exact % center as origin, this is no longer the case, and any rectangular kernel % with any value being declared the origin. This in turn allows the use of % highly asymmetrical kernels. % % The floating point values in the kernel can also include a special value % known as 'nan' or 'not a number' to indicate that this value is not part % of the kernel array. This allows you to shaped the kernel within its % rectangular area. That is 'nan' values provide a 'mask' for the kernel % shape. However at least one non-nan value must be provided for correct % working of a kernel. % % The returned kernel should be freed using the DestroyKernelInfo method % when you are finished with it. Do not free this memory yourself. % % Input kernel defintion strings can consist of any of three types. % % "name:args[[@><]" % Select from one of the built in kernels, using the name and % geometry arguments supplied. See AcquireKernelBuiltIn() % % "WxH[+X+Y][@><]:num, num, num ..." % a kernel of size W by H, with W*H floating point numbers following. % the 'center' can be optionally be defined at +X+Y (such that +0+0 % is top left corner). If not defined the pixel in the center, for % odd sizes, or to the immediate top or left of center for even sizes % is automatically selected. % % "num, num, num, num, ..." % list of floating point numbers defining an 'old style' odd sized % square kernel. At least 9 values should be provided for a 3x3 % square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc. % Values can be space or comma separated. This is not recommended. % % You can define a 'list of kernels' which can be used by some morphology % operators A list is defined as a semi-colon separated list kernels. % % " kernel ; kernel ; kernel ; " % % Any extra ';' characters, at start, end or between kernel defintions are % simply ignored. % % The special flags will expand a single kernel, into a list of rotated % kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree % cyclic rotations, while a '>' will generate a list of 90-degree rotations. % The '<' also exands using 90-degree rotates, but giving a 180-degree % reflected kernel before the +/- 90-degree rotations, which can be important % for Thinning operations. % % Note that 'name' kernels will start with an alphabetic character while the % new kernel specification has a ':' character in its specification string. % If neither is the case, it is assumed an old style of a simple list of % numbers generating a odd-sized square kernel has been given. % % The format of the AcquireKernal method is: % % KernelInfo *AcquireKernelInfo(const char *kernel_string) % % A description of each parameter follows: % % o kernel_string: the Morphology/Convolution kernel wanted. % */ /* This was separated so that it could be used as a separate ** array input handling function, such as for -color-matrix */ static KernelInfo *ParseKernelArray(const char *kernel_string) { KernelInfo *kernel; char token[MaxTextExtent]; const char *p, *end; register ssize_t i; double nan = sqrt((double)-1.0); /* Special Value : Not A Number */ MagickStatusType flags; GeometryInfo args; kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (kernel == (KernelInfo *) NULL) return(kernel); (void) memset(kernel,0,sizeof(*kernel)); kernel->minimum = kernel->maximum = kernel->angle = 0.0; kernel->negative_range = kernel->positive_range = 0.0; kernel->type = UserDefinedKernel; kernel->next = (KernelInfo *) NULL; kernel->signature = MagickCoreSignature; if (kernel_string == (const char *) NULL) return(kernel); /* find end of this specific kernel definition string */ end = strchr(kernel_string, ';'); if ( end == (char *) NULL ) end = strchr(kernel_string, '\0'); /* clear flags - for Expanding kernel lists thorugh rotations */ flags = NoValue; /* Has a ':' in argument - New user kernel specification FUTURE: this split on ':' could be done by StringToken() */ p = strchr(kernel_string, ':'); if ( p != (char *) NULL && p < end) { /* ParseGeometry() needs the geometry separated! -- Arrgghh */ memcpy(token, kernel_string, (size_t) (p-kernel_string)); token[p-kernel_string] = '\0'; SetGeometryInfo(&args); flags = ParseGeometry(token, &args); /* Size handling and checks of geometry settings */ if ( (flags & WidthValue) == 0 ) /* if no width then */ args.rho = args.sigma; /* then width = height */ if ( args.rho < 1.0 ) /* if width too small */ args.rho = 1.0; /* then width = 1 */ if ( args.sigma < 1.0 ) /* if height too small */ args.sigma = args.rho; /* then height = width */ kernel->width = (size_t)args.rho; kernel->height = (size_t)args.sigma; /* Offset Handling and Checks */ if ( args.xi < 0.0 || args.psi < 0.0 ) return(DestroyKernelInfo(kernel)); kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi : (ssize_t) (kernel->width-1)/2; kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi : (ssize_t) (kernel->height-1)/2; if ( kernel->x >= (ssize_t) kernel->width || kernel->y >= (ssize_t) kernel->height ) return(DestroyKernelInfo(kernel)); p++; /* advance beyond the ':' */ } else { /* ELSE - Old old specification, forming odd-square kernel */ /* count up number of values given */ p=(const char *) kernel_string; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\'')) p++; /* ignore "'" chars for convolve filter usage - Cristy */ for (i=0; p < end; i++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); } /* set the size of the kernel - old sized square */ kernel->width = kernel->height= (size_t) sqrt((double) i+1.0); kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; p=(const char *) kernel_string; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\'')) p++; /* ignore "'" chars for convolve filter usage - Cristy */ } /* Read in the kernel values from rest of input string argument */ kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel->width,kernel->height*sizeof(*kernel->values))); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); kernel->minimum=MagickMaximumValue; kernel->maximum=(-MagickMaximumValue); kernel->negative_range = kernel->positive_range = 0.0; for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); if ( LocaleCompare("nan",token) == 0 || LocaleCompare("-",token) == 0 ) { kernel->values[i] = nan; /* this value is not part of neighbourhood */ } else { kernel->values[i] = StringToDouble(token,(char **) NULL); ( kernel->values[i] < 0) ? ( kernel->negative_range += kernel->values[i] ) : ( kernel->positive_range += kernel->values[i] ); Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); } } /* sanity check -- no more values in kernel definition */ GetNextToken(p,&p,MaxTextExtent,token); if ( *token != '\0' && *token != ';' && *token != '\'' ) return(DestroyKernelInfo(kernel)); #if 0 /* this was the old method of handling a incomplete kernel */ if ( i < (ssize_t) (kernel->width*kernel->height) ) { Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); for ( ; i < (ssize_t) (kernel->width*kernel->height); i++) kernel->values[i]=0.0; } #else /* Number of values for kernel was not enough - Report Error */ if ( i < (ssize_t) (kernel->width*kernel->height) ) return(DestroyKernelInfo(kernel)); #endif /* check that we recieved at least one real (non-nan) value! */ if (kernel->minimum == MagickMaximumValue) return(DestroyKernelInfo(kernel)); if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */ ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */ else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */ else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */ ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */ return(kernel); } static KernelInfo *ParseKernelName(const char *kernel_string) { char token[MaxTextExtent]; const char *p, *end; GeometryInfo args; KernelInfo *kernel; MagickStatusType flags; ssize_t type; /* Parse special 'named' kernel */ GetNextToken(kernel_string,&p,MaxTextExtent,token); type=ParseCommandOption(MagickKernelOptions,MagickFalse,token); if ( type < 0 || type == UserDefinedKernel ) return((KernelInfo *) NULL); /* not a valid named kernel */ while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';')) p++; end = strchr(p, ';'); /* end of this kernel defintion */ if ( end == (char *) NULL ) end = strchr(p, '\0'); /* ParseGeometry() needs the geometry separated! -- Arrgghh */ memcpy(token, p, (size_t) (end-p)); token[end-p] = '\0'; SetGeometryInfo(&args); flags = ParseGeometry(token, &args); #if 0 /* For Debugging Geometry Input */ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", flags, args.rho, args.sigma, args.xi, args.psi ); #endif /* special handling of missing values in input string */ switch( type ) { /* Shape Kernel Defaults */ case UnityKernel: if ( (flags & WidthValue) == 0 ) args.rho = 1.0; /* Default scale = 1.0, zero is valid */ break; case SquareKernel: case DiamondKernel: case OctagonKernel: case DiskKernel: case PlusKernel: case CrossKernel: if ( (flags & HeightValue) == 0 ) args.sigma = 1.0; /* Default scale = 1.0, zero is valid */ break; case RingKernel: if ( (flags & XValue) == 0 ) args.xi = 1.0; /* Default scale = 1.0, zero is valid */ break; case RectangleKernel: /* Rectangle - set size defaults */ if ( (flags & WidthValue) == 0 ) /* if no width then */ args.rho = args.sigma; /* then width = height */ if ( args.rho < 1.0 ) /* if width too small */ args.rho = 3; /* then width = 3 */ if ( args.sigma < 1.0 ) /* if height too small */ args.sigma = args.rho; /* then height = width */ if ( (flags & XValue) == 0 ) /* center offset if not defined */ args.xi = (double)(((ssize_t)args.rho-1)/2); if ( (flags & YValue) == 0 ) args.psi = (double)(((ssize_t)args.sigma-1)/2); break; /* Distance Kernel Defaults */ case ChebyshevKernel: case ManhattanKernel: case OctagonalKernel: case EuclideanKernel: if ( (flags & HeightValue) == 0 ) /* no distance scale */ args.sigma = 100.0; /* default distance scaling */ else if ( (flags & AspectValue ) != 0 ) /* '!' flag */ args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */ else if ( (flags & PercentValue ) != 0 ) /* '%' flag */ args.sigma *= QuantumRange/100.0; /* percentage of color range */ break; default: break; } kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args); if ( kernel == (KernelInfo *) NULL ) return(kernel); /* global expand to rotated kernel list - only for single kernels */ if ( kernel->next == (KernelInfo *) NULL ) { if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 45.0); else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 90.0); else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */ ExpandMirrorKernelInfo(kernel); } return(kernel); } MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string) { KernelInfo *kernel, *new_kernel; char *kernel_cache, token[MaxTextExtent]; const char *p; if (kernel_string == (const char *) NULL) return(ParseKernelArray(kernel_string)); p=kernel_string; kernel_cache=(char *) NULL; if (*kernel_string == '@') { ExceptionInfo *exception=AcquireExceptionInfo(); kernel_cache=FileToString(kernel_string+1,~0UL,exception); exception=DestroyExceptionInfo(exception); if (kernel_cache == (char *) NULL) return((KernelInfo *) NULL); p=(const char *) kernel_cache; } kernel=NULL; while (GetNextToken(p,(const char **) NULL,MaxTextExtent,token), *token != '\0') { /* ignore extra or multiple ';' kernel separators */ if (*token != ';') { /* tokens starting with alpha is a Named kernel */ if (isalpha((int) ((unsigned char) *token)) != 0) new_kernel=ParseKernelName(p); else /* otherwise a user defined kernel array */ new_kernel=ParseKernelArray(p); /* Error handling -- this is not proper error handling! */ if (new_kernel == (KernelInfo *) NULL) { if (kernel != (KernelInfo *) NULL) kernel=DestroyKernelInfo(kernel); return((KernelInfo *) NULL); } /* initialise or append the kernel list */ if (kernel == (KernelInfo *) NULL) kernel=new_kernel; else LastKernelInfo(kernel)->next=new_kernel; } /* look for the next kernel in list */ p=strchr(p,';'); if (p == (char *) NULL) break; p++; } if (kernel_cache != (char *) NULL) kernel_cache=DestroyString(kernel_cache); return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e K e r n e l B u i l t I n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireKernelBuiltIn() returned one of the 'named' built-in types of % kernels used for special purposes such as gaussian blurring, skeleton % pruning, and edge distance determination. % % They take a KernelType, and a set of geometry style arguments, which were % typically decoded from a user supplied string, or from a more complex % Morphology Method that was requested. % % The format of the AcquireKernalBuiltIn method is: % % KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type, % const GeometryInfo args) % % A description of each parameter follows: % % o type: the pre-defined type of kernel wanted % % o args: arguments defining or modifying the kernel % % Convolution Kernels % % Unity % The a No-Op or Scaling single element kernel. % % Gaussian:{radius},{sigma} % Generate a two-dimensional gaussian kernel, as used by -gaussian. % The sigma for the curve is required. The resulting kernel is % normalized, % % If 'sigma' is zero, you get a single pixel on a field of zeros. % % NOTE: that the 'radius' is optional, but if provided can limit (clip) % the final size of the resulting kernel to a square 2*radius+1 in size. % The radius should be at least 2 times that of the sigma value, or % sever clipping and aliasing may result. If not given or set to 0 the % radius will be determined so as to produce the best minimal error % result, which is usally much larger than is normally needed. % % LoG:{radius},{sigma} % "Laplacian of a Gaussian" or "Mexician Hat" Kernel. % The supposed ideal edge detection, zero-summing kernel. % % An alturnative to this kernel is to use a "DoG" with a sigma ratio of % approx 1.6 (according to wikipedia). % % DoG:{radius},{sigma1},{sigma2} % "Difference of Gaussians" Kernel. % As "Gaussian" but with a gaussian produced by 'sigma2' subtracted % from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1. % The result is a zero-summing kernel. % % Blur:{radius},{sigma}[,{angle}] % Generates a 1 dimensional or linear gaussian blur, at the angle given % (current restricted to orthogonal angles). If a 'radius' is given the % kernel is clipped to a width of 2*radius+1. Kernel can be rotated % by a 90 degree angle. % % If 'sigma' is zero, you get a single pixel on a field of zeros. % % Note that two convolutions with two "Blur" kernels perpendicular to % each other, is equivalent to a far larger "Gaussian" kernel with the % same sigma value, However it is much faster to apply. This is how the % "-blur" operator actually works. % % Comet:{width},{sigma},{angle} % Blur in one direction only, much like how a bright object leaves % a comet like trail. The Kernel is actually half a gaussian curve, % Adding two such blurs in opposite directions produces a Blur Kernel. % Angle can be rotated in multiples of 90 degrees. % % Note that the first argument is the width of the kernel and not the % radius of the kernel. % % Binomial:[{radius}] % Generate a discrete kernel using a 2 dimentional Pascel's Triangle % of values. Used for special forma of image filters % % # Still to be implemented... % # % # Filter2D % # Filter1D % # Set kernel values using a resize filter, and given scale (sigma) % # Cylindrical or Linear. Is this possible with an image? % # % % Named Constant Convolution Kernels % % All these are unscaled, zero-summing kernels by default. As such for % non-HDRI version of ImageMagick some form of normalization, user scaling, % and biasing the results is recommended, to prevent the resulting image % being 'clipped'. % % The 3x3 kernels (most of these) can be circularly rotated in multiples of % 45 degrees to generate the 8 angled varients of each of the kernels. % % Laplacian:{type} % Discrete Lapacian Kernels, (without normalization) % Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood) % Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood) % Type 2 : 3x3 with center:4 edge:1 corner:-2 % Type 3 : 3x3 with center:4 edge:-2 corner:1 % Type 5 : 5x5 laplacian % Type 7 : 7x7 laplacian % Type 15 : 5x5 LoG (sigma approx 1.4) % Type 19 : 9x9 LoG (sigma approx 1.4) % % Sobel:{angle} % Sobel 'Edge' convolution kernel (3x3) % | -1, 0, 1 | % | -2, 0, 2 | % | -1, 0, 1 | % % Roberts:{angle} % Roberts convolution kernel (3x3) % | 0, 0, 0 | % | -1, 1, 0 | % | 0, 0, 0 | % % Prewitt:{angle} % Prewitt Edge convolution kernel (3x3) % | -1, 0, 1 | % | -1, 0, 1 | % | -1, 0, 1 | % % Compass:{angle} % Prewitt's "Compass" convolution kernel (3x3) % | -1, 1, 1 | % | -1,-2, 1 | % | -1, 1, 1 | % % Kirsch:{angle} % Kirsch's "Compass" convolution kernel (3x3) % | -3,-3, 5 | % | -3, 0, 5 | % | -3,-3, 5 | % % FreiChen:{angle} % Frei-Chen Edge Detector is based on a kernel that is similar to % the Sobel Kernel, but is designed to be isotropic. That is it takes % into account the distance of the diagonal in the kernel. % % | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | % | 1, 0, -1 | % % FreiChen:{type},{angle} % % Frei-Chen Pre-weighted kernels... % % Type 0: default un-nomalized version shown above. % % Type 1: Orthogonal Kernel (same as type 11 below) % | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 1, 0, -1 | % % Type 2: Diagonal form of Kernel... % | 1, sqrt(2), 0 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 0, -sqrt(2) -1 | % % However this kernel is als at the heart of the FreiChen Edge Detection % Process which uses a set of 9 specially weighted kernel. These 9 % kernels not be normalized, but directly applied to the image. The % results is then added together, to produce the intensity of an edge in % a specific direction. The square root of the pixel value can then be % taken as the cosine of the edge, and at least 2 such runs at 90 degrees % from each other, both the direction and the strength of the edge can be % determined. % % Type 10: All 9 of the following pre-weighted kernels... % % Type 11: | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 1, 0, -1 | % % Type 12: | 1, sqrt(2), 1 | % | 0, 0, 0 | / 2*sqrt(2) % | 1, sqrt(2), 1 | % % Type 13: | sqrt(2), -1, 0 | % | -1, 0, 1 | / 2*sqrt(2) % | 0, 1, -sqrt(2) | % % Type 14: | 0, 1, -sqrt(2) | % | -1, 0, 1 | / 2*sqrt(2) % | sqrt(2), -1, 0 | % % Type 15: | 0, -1, 0 | % | 1, 0, 1 | / 2 % | 0, -1, 0 | % % Type 16: | 1, 0, -1 | % | 0, 0, 0 | / 2 % | -1, 0, 1 | % % Type 17: | 1, -2, 1 | % | -2, 4, -2 | / 6 % | -1, -2, 1 | % % Type 18: | -2, 1, -2 | % | 1, 4, 1 | / 6 % | -2, 1, -2 | % % Type 19: | 1, 1, 1 | % | 1, 1, 1 | / 3 % | 1, 1, 1 | % % The first 4 are for edge detection, the next 4 are for line detection % and the last is to add a average component to the results. % % Using a special type of '-1' will return all 9 pre-weighted kernels % as a multi-kernel list, so that you can use them directly (without % normalization) with the special "-set option:morphology:compose Plus" % setting to apply the full FreiChen Edge Detection Technique. % % If 'type' is large it will be taken to be an actual rotation angle for % the default FreiChen (type 0) kernel. As such FreiChen:45 will look % like a Sobel:45 but with 'sqrt(2)' instead of '2' values. % % WARNING: The above was layed out as per % http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf % But rotated 90 degrees so direction is from left rather than the top. % I have yet to find any secondary confirmation of the above. The only % other source found was actual source code at % http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf % Neigher paper defineds the kernels in a way that looks locical or % correct when taken as a whole. % % Boolean Kernels % % Diamond:[{radius}[,{scale}]] % Generate a diamond shaped kernel with given radius to the points. % Kernel size will again be radius*2+1 square and defaults to radius 1, % generating a 3x3 kernel that is slightly larger than a square. % % Square:[{radius}[,{scale}]] % Generate a square shaped kernel of size radius*2+1, and defaulting % to a 3x3 (radius 1). % % Octagon:[{radius}[,{scale}]] % Generate octagonal shaped kernel of given radius and constant scale. % Default radius is 3 producing a 7x7 kernel. A radius of 1 will result % in "Diamond" kernel. % % Disk:[{radius}[,{scale}]] % Generate a binary disk, thresholded at the radius given, the radius % may be a float-point value. Final Kernel size is floor(radius)*2+1 % square. A radius of 5.3 is the default. % % NOTE: That a low radii Disk kernels produce the same results as % many of the previously defined kernels, but differ greatly at larger % radii. Here is a table of equivalences... % "Disk:1" => "Diamond", "Octagon:1", or "Cross:1" % "Disk:1.5" => "Square" % "Disk:2" => "Diamond:2" % "Disk:2.5" => "Octagon" % "Disk:2.9" => "Square:2" % "Disk:3.5" => "Octagon:3" % "Disk:4.5" => "Octagon:4" % "Disk:5.4" => "Octagon:5" % "Disk:6.4" => "Octagon:6" % All other Disk shapes are unique to this kernel, but because a "Disk" % is more circular when using a larger radius, using a larger radius is % preferred over iterating the morphological operation. % % Rectangle:{geometry} % Simply generate a rectangle of 1's with the size given. You can also % specify the location of the 'control point', otherwise the closest % pixel to the center of the rectangle is selected. % % Properly centered and odd sized rectangles work the best. % % Symbol Dilation Kernels % % These kernel is not a good general morphological kernel, but is used % more for highlighting and marking any single pixels in an image using, % a "Dilate" method as appropriate. % % For the same reasons iterating these kernels does not produce the % same result as using a larger radius for the symbol. % % Plus:[{radius}[,{scale}]] % Cross:[{radius}[,{scale}]] % Generate a kernel in the shape of a 'plus' or a 'cross' with % a each arm the length of the given radius (default 2). % % NOTE: "plus:1" is equivalent to a "Diamond" kernel. % % Ring:{radius1},{radius2}[,{scale}] % A ring of the values given that falls between the two radii. % Defaults to a ring of approximataly 3 radius in a 7x7 kernel. % This is the 'edge' pixels of the default "Disk" kernel, % More specifically, "Ring" -> "Ring:2.5,3.5,1.0" % % Hit and Miss Kernels % % Peak:radius1,radius2 % Find any peak larger than the pixels the fall between the two radii. % The default ring of pixels is as per "Ring". % Edges % Find flat orthogonal edges of a binary shape % Corners % Find 90 degree corners of a binary shape % Diagonals:type % A special kernel to thin the 'outside' of diagonals % LineEnds:type % Find end points of lines (for pruning a skeletion) % Two types of lines ends (default to both) can be searched for % Type 0: All line ends % Type 1: single kernel for 4-conneected line ends % Type 2: single kernel for simple line ends % LineJunctions % Find three line junctions (within a skeletion) % Type 0: all line junctions % Type 1: Y Junction kernel % Type 2: Diagonal T Junction kernel % Type 3: Orthogonal T Junction kernel % Type 4: Diagonal X Junction kernel % Type 5: Orthogonal + Junction kernel % Ridges:type % Find single pixel ridges or thin lines % Type 1: Fine single pixel thick lines and ridges % Type 2: Find two pixel thick lines and ridges % ConvexHull % Octagonal Thickening Kernel, to generate convex hulls of 45 degrees % Skeleton:type % Traditional skeleton generating kernels. % Type 1: Tradional Skeleton kernel (4 connected skeleton) % Type 2: HIPR2 Skeleton kernel (8 connected skeleton) % Type 3: Thinning skeleton based on a ressearch paper by % Dan S. Bloomberg (Default Type) % ThinSE:type % A huge variety of Thinning Kernels designed to preserve conectivity. % many other kernel sets use these kernels as source definitions. % Type numbers are 41-49, 81-89, 481, and 482 which are based on % the super and sub notations used in the source research paper. % % Distance Measuring Kernels % % Different types of distance measuring methods, which are used with the % a 'Distance' morphology method for generating a gradient based on % distance from an edge of a binary shape, though there is a technique % for handling a anti-aliased shape. % % See the 'Distance' Morphological Method, for information of how it is % applied. % % Chebyshev:[{radius}][x{scale}[%!]] % Chebyshev Distance (also known as Tchebychev or Chessboard distance) % is a value of one to any neighbour, orthogonal or diagonal. One why % of thinking of it is the number of squares a 'King' or 'Queen' in % chess needs to traverse reach any other position on a chess board. % It results in a 'square' like distance function, but one where % diagonals are given a value that is closer than expected. % % Manhattan:[{radius}][x{scale}[%!]] % Manhattan Distance (also known as Rectilinear, City Block, or the Taxi % Cab distance metric), it is the distance needed when you can only % travel in horizontal or vertical directions only. It is the % distance a 'Rook' in chess would have to travel, and results in a % diamond like distances, where diagonals are further than expected. % % Octagonal:[{radius}][x{scale}[%!]] % An interleving of Manhatten and Chebyshev metrics producing an % increasing octagonally shaped distance. Distances matches those of % the "Octagon" shaped kernel of the same radius. The minimum radius % and default is 2, producing a 5x5 kernel. % % Euclidean:[{radius}][x{scale}[%!]] % Euclidean distance is the 'direct' or 'as the crow flys' distance. % However by default the kernel size only has a radius of 1, which % limits the distance to 'Knight' like moves, with only orthogonal and % diagonal measurements being correct. As such for the default kernel % you will get octagonal like distance function. % % However using a larger radius such as "Euclidean:4" you will get a % much smoother distance gradient from the edge of the shape. Especially % if the image is pre-processed to include any anti-aliasing pixels. % Of course a larger kernel is slower to use, and not always needed. % % The first three Distance Measuring Kernels will only generate distances % of exact multiples of {scale} in binary images. As such you can use a % scale of 1 without loosing any information. However you also need some % scaling when handling non-binary anti-aliased shapes. % % The "Euclidean" Distance Kernel however does generate a non-integer % fractional results, and as such scaling is vital even for binary shapes. % */ MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type, const GeometryInfo *args) { KernelInfo *kernel; register ssize_t i; register ssize_t u, v; double nan = sqrt((double)-1.0); /* Special Value : Not A Number */ /* Generate a new empty kernel if needed */ kernel=(KernelInfo *) NULL; switch(type) { case UndefinedKernel: /* These should not call this function */ case UserDefinedKernel: assert("Should not call this function" != (char *) NULL); break; case LaplacianKernel: /* Named Descrete Convolution Kernels */ case SobelKernel: /* these are defined using other kernels */ case RobertsKernel: case PrewittKernel: case CompassKernel: case KirschKernel: case FreiChenKernel: case EdgesKernel: /* Hit and Miss kernels */ case CornersKernel: case DiagonalsKernel: case LineEndsKernel: case LineJunctionsKernel: case RidgesKernel: case ConvexHullKernel: case SkeletonKernel: case ThinSEKernel: break; /* A pre-generated kernel is not needed */ #if 0 /* set to 1 to do a compile-time check that we haven't missed anything */ case UnityKernel: case GaussianKernel: case DoGKernel: case LoGKernel: case BlurKernel: case CometKernel: case BinomialKernel: case DiamondKernel: case SquareKernel: case RectangleKernel: case OctagonKernel: case DiskKernel: case PlusKernel: case CrossKernel: case RingKernel: case PeaksKernel: case ChebyshevKernel: case ManhattanKernel: case OctangonalKernel: case EuclideanKernel: #else default: #endif /* Generate the base Kernel Structure */ kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (kernel == (KernelInfo *) NULL) return(kernel); (void) memset(kernel,0,sizeof(*kernel)); kernel->minimum = kernel->maximum = kernel->angle = 0.0; kernel->negative_range = kernel->positive_range = 0.0; kernel->type = type; kernel->next = (KernelInfo *) NULL; kernel->signature = MagickCoreSignature; break; } switch(type) { /* Convolution Kernels */ case UnityKernel: { kernel->height = kernel->width = (size_t) 1; kernel->x = kernel->y = (ssize_t) 0; kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(1, sizeof(*kernel->values))); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); kernel->maximum = kernel->values[0] = args->rho; break; } break; case GaussianKernel: case DoGKernel: case LoGKernel: { double sigma = fabs(args->sigma), sigma2 = fabs(args->xi), A, B, R; if ( args->rho >= 1.0 ) kernel->width = (size_t)args->rho*2+1; else if ( (type != DoGKernel) || (sigma >= sigma2) ) kernel->width = GetOptimalKernelWidth2D(args->rho,sigma); else kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2); kernel->height = kernel->width; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel->width,kernel->height*sizeof(*kernel->values))); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* WARNING: The following generates a 'sampled gaussian' kernel. * What we really want is a 'discrete gaussian' kernel. * * How to do this is I don't know, but appears to be basied on the * Error Function 'erf()' (intergral of a gaussian) */ if ( type == GaussianKernel || type == DoGKernel ) { /* Calculate a Gaussian, OR positive half of a DoG */ if ( sigma > MagickEpsilon ) { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ B = (double) (1.0/(Magick2PI*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B; } else /* limiting case - a unity (normalized Dirac) kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } } if ( type == DoGKernel ) { /* Subtract a Negative Gaussian for "Difference of Gaussian" */ if ( sigma2 > MagickEpsilon ) { sigma = sigma2; /* simplify loop expressions */ A = 1.0/(2.0*sigma*sigma); B = (double) (1.0/(Magick2PI*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B; } else /* limiting case - a unity (normalized Dirac) kernel */ kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0; } if ( type == LoGKernel ) { /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */ if ( sigma > MagickEpsilon ) { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { R = ((double)(u*u+v*v))*A; kernel->values[i] = (1-R)*exp(-R)*B; } } else /* special case - generate a unity kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } } /* Note the above kernels may have been 'clipped' by a user defined ** radius, producing a smaller (darker) kernel. Also for very small ** sigma's (> 0.1) the central value becomes larger than one, and thus ** producing a very bright kernel. ** ** Normalization will still be needed. */ /* Normalize the 2D Gaussian Kernel ** ** NB: a CorrelateNormalize performs a normal Normalize if ** there are no negative values. */ CalcKernelMetaData(kernel); /* the other kernel meta-data */ ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue); break; } case BlurKernel: { double sigma = fabs(args->sigma), alpha, beta; if ( args->rho >= 1.0 ) kernel->width = (size_t)args->rho*2+1; else kernel->width = GetOptimalKernelWidth1D(args->rho,sigma); kernel->height = 1; kernel->x = (ssize_t) (kernel->width-1)/2; kernel->y = 0; kernel->negative_range = kernel->positive_range = 0.0; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); #if 1 #define KernelRank 3 /* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix). ** It generates a gaussian 3 times the width, and compresses it into ** the expected range. This produces a closer normalization of the ** resulting kernel, especially for very low sigma values. ** As such while wierd it is prefered. ** ** I am told this method originally came from Photoshop. ** ** A properly normalized curve is generated (apart from edge clipping) ** even though we later normalize the result (for edge clipping) ** to allow the correct generation of a "Difference of Blurs". */ /* initialize */ v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */ (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); /* Calculate a Positive 1D Gaussian */ if ( sigma > MagickEpsilon ) { sigma *= KernelRank; /* simplify loop expressions */ alpha = 1.0/(2.0*sigma*sigma); beta= (double) (1.0/(MagickSQ2PI*sigma )); for ( u=-v; u <= v; u++) { kernel->values[(u+v)/KernelRank] += exp(-((double)(u*u))*alpha)*beta; } } else /* special case - generate a unity kernel */ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; #else /* Direct calculation without curve averaging This is equivelent to a KernelRank of 1 */ /* Calculate a Positive Gaussian */ if ( sigma > MagickEpsilon ) { alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ beta = 1.0/(MagickSQ2PI*sigma); for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = exp(-((double)(u*u))*alpha)*beta; } else /* special case - generate a unity kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } #endif /* Note the above kernel may have been 'clipped' by a user defined ** radius, producing a smaller (darker) kernel. Also for very small ** sigma's (< 0.1) the central value becomes larger than one, as a ** result of not generating a actual 'discrete' kernel, and thus ** producing a very bright 'impulse'. ** ** Becuase of these two factors Normalization is required! */ /* Normalize the 1D Gaussian Kernel ** ** NB: a CorrelateNormalize performs a normal Normalize if ** there are no negative values. */ CalcKernelMetaData(kernel); /* the other kernel meta-data */ ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue); /* rotate the 1D kernel by given angle */ RotateKernelInfo(kernel, args->xi ); break; } case CometKernel: { double sigma = fabs(args->sigma), A; if ( args->rho < 1.0 ) kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1; else kernel->width = (size_t)args->rho; kernel->x = kernel->y = 0; kernel->height = 1; kernel->negative_range = kernel->positive_range = 0.0; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* A comet blur is half a 1D gaussian curve, so that the object is ** blurred in one direction only. This may not be quite the right ** curve to use so may change in the future. The function must be ** normalised after generation, which also resolves any clipping. ** ** As we are normalizing and not subtracting gaussians, ** there is no need for a divisor in the gaussian formula ** ** It is less comples */ if ( sigma > MagickEpsilon ) { #if 1 #define KernelRank 3 v = (ssize_t) kernel->width*KernelRank; /* start/end points */ (void) memset(kernel->values,0, (size_t) kernel->width*sizeof(*kernel->values)); sigma *= KernelRank; /* simplify the loop expression */ A = 1.0/(2.0*sigma*sigma); /* B = 1.0/(MagickSQ2PI*sigma); */ for ( u=0; u < v; u++) { kernel->values[u/KernelRank] += exp(-((double)(u*u))*A); /* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */ } for (i=0; i < (ssize_t) kernel->width; i++) kernel->positive_range += kernel->values[i]; #else A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */ /* B = 1.0/(MagickSQ2PI*sigma); */ for ( i=0; i < (ssize_t) kernel->width; i++) kernel->positive_range += kernel->values[i] = exp(-((double)(i*i))*A); /* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */ #endif } else /* special case - generate a unity kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; kernel->positive_range = 1.0; } kernel->minimum = 0.0; kernel->maximum = kernel->values[0]; kernel->negative_range = 0.0; ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */ RotateKernelInfo(kernel, args->xi); /* Rotate by angle */ break; } case BinomialKernel: { size_t order_f; if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; order_f = fact(kernel->width-1); kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values within diamond area to scale given */ for ( i=0, v=0; v < (ssize_t)kernel->height; v++) { size_t alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) ); for ( u=0; u < (ssize_t)kernel->width; u++, i++) kernel->positive_range += kernel->values[i] = (double) (alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) )); } kernel->minimum = 1.0; kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width]; kernel->negative_range = 0.0; break; } /* Convolution Kernels - Well Known Named Constant Kernels */ case LaplacianKernel: { switch ( (int) args->rho ) { case 0: default: /* laplacian square filter -- default */ kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1"); break; case 1: /* laplacian diamond filter */ kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0"); break; case 2: kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2"); break; case 3: kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1"); break; case 5: /* a 5x5 laplacian */ kernel=ParseKernelArray( "5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4"); break; case 7: /* a 7x7 laplacian */ kernel=ParseKernelArray( "7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" ); break; case 15: /* a 5x5 LoG (sigma approx 1.4) */ kernel=ParseKernelArray( "5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0"); break; case 19: /* a 9x9 LoG (sigma approx 1.4) */ /* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */ kernel=ParseKernelArray( "9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; break; } case SobelKernel: { /* Simple Sobel Kernel */ kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case RobertsKernel: { kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case PrewittKernel: { kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case CompassKernel: { kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case KirschKernel: { kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case FreiChenKernel: /* Direction is set to be left to right positive */ /* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */ /* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */ { switch ( (int) args->rho ) { default: case 0: kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[3] = +MagickSQ2; kernel->values[5] = -MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ break; case 2: kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[1] = kernel->values[3]= +MagickSQ2; kernel->values[5] = kernel->values[7]= -MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 10: kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19"); if (kernel == (KernelInfo *) NULL) return(kernel); break; case 1: case 11: kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[3] = +MagickSQ2; kernel->values[5] = -MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 12: kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[1] = +MagickSQ2; kernel->values[7] = +MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 13: kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[0] = +MagickSQ2; kernel->values[8] = -MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 14: kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[2] = -MagickSQ2; kernel->values[6] = +MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 15: kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/2.0, NoValue); break; case 16: kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/2.0, NoValue); break; case 17: kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/6.0, NoValue); break; case 18: kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/6.0, NoValue); break; case 19: kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/3.0, NoValue); break; } if ( fabs(args->sigma) >= MagickEpsilon ) /* Rotate by correctly supplied 'angle' */ RotateKernelInfo(kernel, args->sigma); else if ( args->rho > 30.0 || args->rho < -30.0 ) /* Rotate by out of bounds 'type' */ RotateKernelInfo(kernel, args->rho); break; } /* Boolean or Shaped Kernels */ case DiamondKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values within diamond area to scale given */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case SquareKernel: case RectangleKernel: { double scale; if ( type == SquareKernel ) { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = (size_t) (2*args->rho+1); kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; scale = args->sigma; } else { /* NOTE: user defaults set in "AcquireKernelInfo()" */ if ( args->rho < 1.0 || args->sigma < 1.0 ) return(DestroyKernelInfo(kernel)); /* invalid args given */ kernel->width = (size_t)args->rho; kernel->height = (size_t)args->sigma; if ( args->xi < 0.0 || args->xi > (double)kernel->width || args->psi < 0.0 || args->psi > (double)kernel->height ) return(DestroyKernelInfo(kernel)); /* invalid args given */ kernel->x = (ssize_t) args->xi; kernel->y = (ssize_t) args->psi; scale = 1.0; } kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values to scale given */ u=(ssize_t) (kernel->width*kernel->height); for ( i=0; i < u; i++) kernel->values[i] = scale; kernel->minimum = kernel->maximum = scale; /* a flat shape */ kernel->positive_range = scale*u; break; } case OctagonKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius = 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ( (labs((long) u)+labs((long) v)) <= ((long)kernel->x + (long)(kernel->x/2)) ) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case DiskKernel: { ssize_t limit = (ssize_t)(args->rho*args->rho); if (args->rho < 0.4) /* default radius approx 4.3 */ kernel->width = kernel->height = 9L, limit = 18L; else kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ((u*u+v*v) <= limit) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case PlusKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values along axises to given scale */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0); break; } case CrossKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values along axises to given scale */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = (u == v || u == -v) ? args->sigma : nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0); break; } /* HitAndMiss Kernels */ case RingKernel: case PeaksKernel: { ssize_t limit1, limit2, scale; if (args->rho < args->sigma) { kernel->width = ((size_t)args->sigma)*2+1; limit1 = (ssize_t)(args->rho*args->rho); limit2 = (ssize_t)(args->sigma*args->sigma); } else { kernel->width = ((size_t)args->rho)*2+1; limit1 = (ssize_t)(args->sigma*args->sigma); limit2 = (ssize_t)(args->rho*args->rho); } if ( limit2 <= 0 ) kernel->width = 7L, limit1 = 7L, limit2 = 11L; kernel->height = kernel->width; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */ scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi); for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { ssize_t radius=u*u+v*v; if (limit1 < radius && radius <= limit2) kernel->positive_range += kernel->values[i] = (double) scale; else kernel->values[i] = nan; } kernel->minimum = kernel->maximum = (double) scale; if ( type == PeaksKernel ) { /* set the central point in the middle */ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; kernel->positive_range = 1.0; kernel->maximum = 1.0; } break; } case EdgesKernel: { kernel=AcquireKernelInfo("ThinSE:482"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */ break; } case CornersKernel: { kernel=AcquireKernelInfo("ThinSE:87"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */ break; } case DiagonalsKernel: { switch ( (int) args->rho ) { case 0: default: { KernelInfo *new_kernel; kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; ExpandMirrorKernelInfo(kernel); return(kernel); } case 1: kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-"); break; case 2: kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case LineEndsKernel: { /* Kernels for finding the end of thin lines */ switch ( (int) args->rho ) { case 0: default: /* set of kernels to find all end of lines */ return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>")); case 1: /* kernel for 4-connected line ends - no rotation */ kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-"); break; case 2: /* kernel to add for 8-connected lines - no rotation */ kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1"); break; case 3: /* kernel to add for orthogonal line ends - does not find corners */ kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0"); break; case 4: /* traditional line end - fails on last T end */ kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case LineJunctionsKernel: { /* kernels for finding the junctions of multiple lines */ switch ( (int) args->rho ) { case 0: default: /* set of kernels to find all line junctions */ return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>")); case 1: /* Y Junction */ kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-"); break; case 2: /* Diagonal T Junctions */ kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1"); break; case 3: /* Orthogonal T Junctions */ kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-"); break; case 4: /* Diagonal X Junctions */ kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1"); break; case 5: /* Orthogonal X Junctions - minimal diamond kernel */ kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case RidgesKernel: { /* Ridges - Ridge finding kernels */ KernelInfo *new_kernel; switch ( (int) args->rho ) { case 1: default: kernel=ParseKernelArray("3x1:0,1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */ break; case 2: kernel=ParseKernelArray("4x1:0,1,1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */ /* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */ /* Unfortunatally we can not yet rotate a non-square kernel */ /* But then we can't flip a non-symetrical kernel either */ new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; break; } break; } case ConvexHullKernel: { KernelInfo *new_kernel; /* first set of 8 kernels */ kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* append the mirror versions too - no flip function yet */ new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; ExpandRotateKernelInfo(new_kernel, 90.0); LastKernelInfo(kernel)->next = new_kernel; break; } case SkeletonKernel: { switch ( (int) args->rho ) { case 1: default: /* Traditional Skeleton... ** A cyclically rotated single kernel */ kernel=AcquireKernelInfo("ThinSE:482"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */ break; case 2: /* HIPR Variation of the cyclic skeleton ** Corners of the traditional method made more forgiving, ** but the retain the same cyclic order. */ kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;"); if (kernel == (KernelInfo *) NULL) return(kernel); if (kernel->next == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); kernel->type = type; kernel->next->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */ break; case 3: /* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's ** "Connectivity-Preserving Morphological Image Thransformations" ** by Dan S. Bloomberg, available on Leptonica, Selected Papers, ** http://www.leptonica.com/papers/conn.pdf */ kernel=AcquireKernelInfo( "ThinSE:41; ThinSE:42; ThinSE:43"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->next->type = type; kernel->next->next->type = type; ExpandMirrorKernelInfo(kernel); /* 12 kernels total */ break; } break; } case ThinSEKernel: { /* Special kernels for general thinning, while preserving connections ** "Connectivity-Preserving Morphological Image Thransformations" ** by Dan S. Bloomberg, available on Leptonica, Selected Papers, ** http://www.leptonica.com/papers/conn.pdf ** And ** http://tpgit.github.com/Leptonica/ccthin_8c_source.html ** ** Note kernels do not specify the origin pixel, allowing them ** to be used for both thickening and thinning operations. */ switch ( (int) args->rho ) { /* SE for 4-connected thinning */ case 41: /* SE_4_1 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1"); break; case 42: /* SE_4_2 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-"); break; case 43: /* SE_4_3 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1"); break; case 44: /* SE_4_4 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-"); break; case 45: /* SE_4_5 */ kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-"); break; case 46: /* SE_4_6 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1"); break; case 47: /* SE_4_7 */ kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-"); break; case 48: /* SE_4_8 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1"); break; case 49: /* SE_4_9 */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1"); break; /* SE for 8-connected thinning - negatives of the above */ case 81: /* SE_8_0 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-"); break; case 82: /* SE_8_2 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-"); break; case 83: /* SE_8_3 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-"); break; case 84: /* SE_8_4 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-"); break; case 85: /* SE_8_5 */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-"); break; case 86: /* SE_8_6 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1"); break; case 87: /* SE_8_7 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-"); break; case 88: /* SE_8_8 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-"); break; case 89: /* SE_8_9 */ kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-"); break; /* Special combined SE kernels */ case 423: /* SE_4_2 , SE_4_3 Combined Kernel */ kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-"); break; case 823: /* SE_8_2 , SE_8_3 Combined Kernel */ kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-"); break; case 481: /* SE_48_1 - General Connected Corner Kernel */ kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-"); break; default: case 482: /* SE_48_2 - General Edge Kernel */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } /* Distance Measuring Kernels */ case ChebyshevKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*MagickMax(fabs((double)u),fabs((double)v)) ); kernel->maximum = kernel->values[0]; break; } case ManhattanKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*(labs((long) u)+labs((long) v)) ); kernel->maximum = kernel->values[0]; break; } case OctagonalKernel: { if (args->rho < 2.0) kernel->width = kernel->height = 5; /* default/minimum radius = 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { double r1 = MagickMax(fabs((double)u),fabs((double)v)), r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5); kernel->positive_range += kernel->values[i] = args->sigma*MagickMax(r1,r2); } kernel->maximum = kernel->values[0]; break; } case EuclideanKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*sqrt((double)(u*u+v*v)) ); kernel->maximum = kernel->values[0]; break; } default: { /* No-Op Kernel - Basically just a single pixel on its own */ kernel=ParseKernelArray("1:1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = UndefinedKernel; break; } break; } return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneKernelInfo() creates a new clone of the given Kernel List so that its % can be modified without effecting the original. The cloned kernel should % be destroyed using DestoryKernelInfo() when no longer needed. % % The format of the CloneKernelInfo method is: % % KernelInfo *CloneKernelInfo(const KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to be cloned % */ MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel) { register ssize_t i; KernelInfo *new_kernel; assert(kernel != (KernelInfo *) NULL); new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (new_kernel == (KernelInfo *) NULL) return(new_kernel); *new_kernel=(*kernel); /* copy values in structure */ /* replace the values with a copy of the values */ new_kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (new_kernel->values == (double *) NULL) return(DestroyKernelInfo(new_kernel)); for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) new_kernel->values[i]=kernel->values[i]; /* Also clone the next kernel in the kernel list */ if ( kernel->next != (KernelInfo *) NULL ) { new_kernel->next = CloneKernelInfo(kernel->next); if ( new_kernel->next == (KernelInfo *) NULL ) return(DestroyKernelInfo(new_kernel)); } return(new_kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyKernelInfo() frees the memory used by a Convolution/Morphology % kernel. % % The format of the DestroyKernelInfo method is: % % KernelInfo *DestroyKernelInfo(KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to be destroyed % */ MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel) { assert(kernel != (KernelInfo *) NULL); if (kernel->next != (KernelInfo *) NULL) kernel->next=DestroyKernelInfo(kernel->next); kernel->values=(double *) RelinquishAlignedMemory(kernel->values); kernel=(KernelInfo *) RelinquishMagickMemory(kernel); return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d M i r r o r K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandMirrorKernelInfo() takes a single kernel, and expands it into a % sequence of 90-degree rotated kernels but providing a reflected 180 % rotatation, before the -/+ 90-degree rotations. % % This special rotation order produces a better, more symetrical thinning of % objects. % % The format of the ExpandMirrorKernelInfo method is: % % void ExpandMirrorKernelInfo(KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % This function is only internel to this module, as it is not finalized, % especially with regard to non-orthogonal angles, and rotation of larger % 2D kernels. */ #if 0 static void FlopKernelInfo(KernelInfo *kernel) { /* Do a Flop by reversing each row. */ size_t y; register ssize_t x,r; register double *k,t; for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width) for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--) t=k[x], k[x]=k[r], k[r]=t; kernel->x = kernel->width - kernel->x - 1; angle = fmod(angle+180.0, 360.0); } #endif static void ExpandMirrorKernelInfo(KernelInfo *kernel) { KernelInfo *clone, *last; last = kernel; clone = CloneKernelInfo(last); if (clone == (KernelInfo *) NULL) return; RotateKernelInfo(clone, 180); /* flip */ LastKernelInfo(last)->next = clone; last = clone; clone = CloneKernelInfo(last); if (clone == (KernelInfo *) NULL) return; RotateKernelInfo(clone, 90); /* transpose */ LastKernelInfo(last)->next = clone; last = clone; clone = CloneKernelInfo(last); if (clone == (KernelInfo *) NULL) return; RotateKernelInfo(clone, 180); /* flop */ LastKernelInfo(last)->next = clone; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d R o t a t e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating % incrementally by the angle given, until the kernel repeats. % % WARNING: 45 degree rotations only works for 3x3 kernels. % While 90 degree roatations only works for linear and square kernels % % The format of the ExpandRotateKernelInfo method is: % % void ExpandRotateKernelInfo(KernelInfo *kernel,double angle) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o angle: angle to rotate in degrees % % This function is only internel to this module, as it is not finalized, % especially with regard to non-orthogonal angles, and rotation of larger % 2D kernels. */ /* Internal Routine - Return true if two kernels are the same */ static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1, const KernelInfo *kernel2) { register size_t i; /* check size and origin location */ if ( kernel1->width != kernel2->width || kernel1->height != kernel2->height || kernel1->x != kernel2->x || kernel1->y != kernel2->y ) return MagickFalse; /* check actual kernel values */ for (i=0; i < (kernel1->width*kernel1->height); i++) { /* Test for Nan equivalence */ if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) ) return MagickFalse; if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) ) return MagickFalse; /* Test actual values are equivalent */ if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon ) return MagickFalse; } return MagickTrue; } static void ExpandRotateKernelInfo(KernelInfo *kernel,const double angle) { KernelInfo *clone_info, *last; clone_info=(KernelInfo *) NULL; last=kernel; DisableMSCWarning(4127) while (1) { RestoreMSCWarning clone_info=CloneKernelInfo(last); if (clone_info == (KernelInfo *) NULL) break; RotateKernelInfo(clone_info,angle); if (SameKernelInfo(kernel,clone_info) != MagickFalse) break; LastKernelInfo(last)->next=clone_info; last=clone_info; } if (clone_info != (KernelInfo *) NULL) clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a l c M e t a K e r n a l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only, % using the kernel values. This should only ne used if it is not possible to % calculate that meta-data in some easier way. % % It is important that the meta-data is correct before ScaleKernelInfo() is % used to perform kernel normalization. % % The format of the CalcKernelMetaData method is: % % void CalcKernelMetaData(KernelInfo *kernel, const double scale ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to modify % % WARNING: Minimum and Maximum values are assumed to include zero, even if % zero is not part of the kernel (as in Gaussian Derived kernels). This % however is not true for flat-shaped morphological kernels. % % WARNING: Only the specific kernel pointed to is modified, not a list of % multiple kernels. % % This is an internal function and not expected to be useful outside this % module. This could change however. */ static void CalcKernelMetaData(KernelInfo *kernel) { register size_t i; kernel->minimum = kernel->maximum = 0.0; kernel->negative_range = kernel->positive_range = 0.0; for (i=0; i < (kernel->width*kernel->height); i++) { if ( fabs(kernel->values[i]) < MagickEpsilon ) kernel->values[i] = 0.0; ( kernel->values[i] < 0) ? ( kernel->negative_range += kernel->values[i] ) : ( kernel->positive_range += kernel->values[i] ); Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); } return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h o l o g y A p p l y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MorphologyApply() applies a morphological method, multiple times using % a list of multiple kernels. This is the method that should be called by % other 'operators' that internally use morphology operations as part of % their processing. % % It is basically equivalent to as MorphologyImage() (see below) but % without any user controls. This allows internel programs to use this % function, to actually perform a specific task without possible interference % by any API user supplied settings. % % It is MorphologyImage() task to extract any such user controls, and % pass them to this function for processing. % % More specifically all given kernels should already be scaled, normalised, % and blended appropriatally before being parred to this routine. The % appropriate bias, and compose (typically 'UndefinedComposeOp') given. % % The format of the MorphologyApply method is: % % Image *MorphologyApply(const Image *image,MorphologyMethod method, % const ChannelType channel, const ssize_t iterations, % const KernelInfo *kernel, const CompositeMethod compose, % const double bias, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the source image % % o method: the morphology method to be applied. % % o channel: the channels to which the operations are applied % The channel 'sync' flag determines if 'alpha weighting' is % applied for convolution style operations. % % o iterations: apply the operation this many times (or no change). % A value of -1 means loop until no change found. % How this is applied may depend on the morphology method. % Typically this is a value of 1. % % o channel: the channel type. % % o kernel: An array of double representing the morphology kernel. % % o compose: How to handle or merge multi-kernel results. % If 'UndefinedCompositeOp' use default for the Morphology method. % If 'NoCompositeOp' force image to be re-iterated by each kernel. % Otherwise merge the results using the compose method given. % % o bias: Convolution Output Bias. % % o exception: return any errors or warnings in this structure. % */ /* Apply a Morphology Primative to an image using the given kernel. ** Two pre-created images must be provided, and no image is created. ** It returns the number of pixels that changed between the images ** for result convergence determination. */ static ssize_t MorphologyPrimitive(const Image *image, Image *result_image, const MorphologyMethod method, const ChannelType channel, const KernelInfo *kernel,const double bias,ExceptionInfo *exception) { #define MorphologyTag "Morphology/Image" CacheView *p_view, *q_view; register ssize_t i; size_t *changes, changed, virt_width; ssize_t y, offx, offy; MagickBooleanType status; MagickOffsetType progress; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(result_image != (Image *) NULL); assert(result_image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=MagickTrue; progress=0; p_view=AcquireVirtualCacheView(image,exception); q_view=AcquireAuthenticCacheView(result_image,exception); virt_width=image->columns+kernel->width-1; /* Some methods (including convolve) needs use a reflected kernel. * Adjust 'origin' offsets to loop though kernel as a reflection. */ offx = kernel->x; offy = kernel->y; switch(method) { case ConvolveMorphology: case DilateMorphology: case DilateIntensityMorphology: case IterativeDistanceMorphology: /* kernel needs to used with reflection about origin */ offx = (ssize_t) kernel->width-offx-1; offy = (ssize_t) kernel->height-offy-1; break; case ErodeMorphology: case ErodeIntensityMorphology: case HitAndMissMorphology: case ThinningMorphology: case ThickenMorphology: /* kernel is used as is, without reflection */ break; default: assert("Not a Primitive Morphology Method" != (char *) NULL); break; } changed=0; changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(), sizeof(*changes)); if (changes == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) changes[i]=0; if ( method == ConvolveMorphology && kernel->width == 1 ) { /* Special handling (for speed) of vertical (blur) kernels. ** This performs its handling in columns rather than in rows. ** This is only done for convolve as it is the only method that ** generates very large 1-D vertical kernels (such as a 'BlurKernel') ** ** Timing tests (on single CPU laptop) ** Using a vertical 1-d Blue with normal row-by-row (below) ** time convert logo: -morphology Convolve Blur:0x10+90 null: ** 0.807u ** Using this column method ** time convert logo: -morphology Convolve Blur:0x10+90 null: ** 0.620u ** ** Anthony Thyssen, 14 June 2010 */ register ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,result_image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); register const PixelPacket *magick_restrict p; register const IndexPacket *magick_restrict p_indexes; register PixelPacket *magick_restrict q; register IndexPacket *magick_restrict q_indexes; register ssize_t y; ssize_t r; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(p_view,x,-offy,1,image->rows+kernel->height-1, exception); q=GetCacheViewAuthenticPixels(q_view,x,0,1,result_image->rows,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } p_indexes=GetCacheViewVirtualIndexQueue(p_view); q_indexes=GetCacheViewAuthenticIndexQueue(q_view); /* offset to origin in 'p'. while 'q' points to it directly */ r = offy; for (y=0; y < (ssize_t) image->rows; y++) { DoublePixelPacket result; register ssize_t v; register const double *magick_restrict k; register const PixelPacket *magick_restrict k_pixels; register const IndexPacket *magick_restrict k_indexes; /* Copy input image to the output image for unused channels * This removes need for 'cloning' a new image every iteration */ *q = p[r]; if (image->colorspace == CMYKColorspace) SetPixelIndex(q_indexes+y,GetPixelIndex(p_indexes+y+r)); /* Set the bias of the weighted average output */ result.red = result.green = result.blue = result.opacity = result.index = bias; /* Weighted Average of pixels using reflected kernel ** ** NOTE for correct working of this operation for asymetrical ** kernels, the kernel needs to be applied in its reflected form. ** That is its values needs to be reversed. */ k = &kernel->values[ kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes+y; if ( ((channel & SyncChannels) == 0 ) || (image->matte == MagickFalse) ) { /* No 'Sync' involved. ** Convolution is simple greyscale channel operation */ for (v=0; v < (ssize_t) kernel->height; v++) { if ( IsNaN(*k) ) continue; result.red += (*k)*GetPixelRed(k_pixels); result.green += (*k)*GetPixelGreen(k_pixels); result.blue += (*k)*GetPixelBlue(k_pixels); result.opacity += (*k)*GetPixelOpacity(k_pixels); if ( image->colorspace == CMYKColorspace) result.index += (*k)*(*k_indexes); k--; k_pixels++; k_indexes++; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(result.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ClampToQuantum(result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+y,ClampToQuantum(result.index)); } else { /* Channel 'Sync' Flag, and Alpha Channel enabled. ** Weight the color channels with Alpha Channel so that ** transparent pixels are not part of the results. */ double gamma; /* divisor, sum of color alpha weighting */ MagickRealType alpha; /* alpha weighting for colors : alpha */ size_t count; /* alpha valus collected, number kernel values */ count=0; gamma=0.0; for (v=0; v < (ssize_t) kernel->height; v++) { if ( IsNaN(*k) ) continue; alpha=QuantumScale*(QuantumRange-GetPixelOpacity(k_pixels)); count++; /* number of alpha values collected */ alpha*=(*k); /* include kernel weighting now */ gamma += alpha; /* normalize alpha weights only */ result.red += alpha*GetPixelRed(k_pixels); result.green += alpha*GetPixelGreen(k_pixels); result.blue += alpha*GetPixelBlue(k_pixels); result.opacity += (*k)*GetPixelOpacity(k_pixels); if ( image->colorspace == CMYKColorspace) result.index += alpha*(*k_indexes); k--; k_pixels++; k_indexes++; } /* Sync'ed channels, all channels are modified */ gamma=PerceptibleReciprocal(gamma); if (count != 0) gamma*=(double) kernel->height/count; SetPixelRed(q,ClampToQuantum(gamma*result.red)); SetPixelGreen(q,ClampToQuantum(gamma*result.green)); SetPixelBlue(q,ClampToQuantum(gamma*result.blue)); SetPixelOpacity(q,ClampToQuantum(result.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(q_indexes+y,ClampToQuantum(gamma*result.index)); } /* Count up changed pixels */ if ( ( p[r].red != GetPixelRed(q)) || ( p[r].green != GetPixelGreen(q)) || ( p[r].blue != GetPixelBlue(q)) || ( (image->matte != MagickFalse) && (p[r].opacity != GetPixelOpacity(q))) || ( (image->colorspace == CMYKColorspace) && (GetPixelIndex(p_indexes+y+r) != GetPixelIndex(q_indexes+y))) ) changes[id]++; p++; q++; } /* y */ if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MorphologyTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* x */ result_image->type=image->type; q_view=DestroyCacheView(q_view); p_view=DestroyCacheView(p_view); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) changed+=changes[i]; changes=(size_t *) RelinquishMagickMemory(changes); return(status ? (ssize_t) changed : 0); } /* ** Normal handling of horizontal or rectangular kernels (row by row) */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,result_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const PixelPacket *magick_restrict p; register const IndexPacket *magick_restrict p_indexes; register PixelPacket *magick_restrict q; register IndexPacket *magick_restrict q_indexes; register ssize_t x; size_t r; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(p_view, -offx, y-offy, virt_width, kernel->height, exception); q=GetCacheViewAuthenticPixels(q_view,0,y,result_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } p_indexes=GetCacheViewVirtualIndexQueue(p_view); q_indexes=GetCacheViewAuthenticIndexQueue(q_view); /* offset to origin in 'p'. while 'q' points to it directly */ r = virt_width*offy + offx; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t v; register ssize_t u; register const double *magick_restrict k; register const PixelPacket *magick_restrict k_pixels; register const IndexPacket *magick_restrict k_indexes; DoublePixelPacket result, min, max; /* Copy input image to the output image for unused channels * This removes need for 'cloning' a new image every iteration */ *q = p[r]; if (image->colorspace == CMYKColorspace) SetPixelIndex(q_indexes+x,GetPixelIndex(p_indexes+x+r)); /* Defaults */ min.red = min.green = min.blue = min.opacity = min.index = (double) QuantumRange; max.red = max.green = max.blue = max.opacity = max.index = 0.0; /* default result is the original pixel value */ result.red = (double) p[r].red; result.green = (double) p[r].green; result.blue = (double) p[r].blue; result.opacity = QuantumRange - (double) p[r].opacity; result.index = 0.0; if ( image->colorspace == CMYKColorspace) result.index = (double) GetPixelIndex(p_indexes+x+r); switch (method) { case ConvolveMorphology: /* Set the bias of the weighted average output */ result.red = result.green = result.blue = result.opacity = result.index = bias; break; case DilateIntensityMorphology: case ErodeIntensityMorphology: /* use a boolean flag indicating when first match found */ result.red = 0.0; /* result is not used otherwise */ break; default: break; } switch ( method ) { case ConvolveMorphology: /* Weighted Average of pixels using reflected kernel ** ** NOTE for correct working of this operation for asymetrical ** kernels, the kernel needs to be applied in its reflected form. ** That is its values needs to be reversed. ** ** Correlation is actually the same as this but without reflecting ** the kernel, and thus 'lower-level' that Convolution. However ** as Convolution is the more common method used, and it does not ** really cost us much in terms of processing to use a reflected ** kernel, so it is Convolution that is implemented. ** ** Correlation will have its kernel reflected before calling ** this function to do a Convolve. ** ** For more details of Correlation vs Convolution see ** http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes+x; if ( ((channel & SyncChannels) == 0 ) || (image->matte == MagickFalse) ) { /* No 'Sync' involved. ** Convolution is simple greyscale channel operation */ for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; result.red += (*k)*k_pixels[u].red; result.green += (*k)*k_pixels[u].green; result.blue += (*k)*k_pixels[u].blue; result.opacity += (*k)*k_pixels[u].opacity; if ( image->colorspace == CMYKColorspace) result.index += (*k)*GetPixelIndex(k_indexes+u); } k_pixels += virt_width; k_indexes += virt_width; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum((MagickRealType) result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum((MagickRealType) result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum((MagickRealType) result.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelOpacity(q,ClampToQuantum((MagickRealType) result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+x,ClampToQuantum(result.index)); } else { /* Channel 'Sync' Flag, and Alpha Channel enabled. ** Weight the color channels with Alpha Channel so that ** transparent pixels are not part of the results. */ double alpha, /* alpha weighting for colors : alpha */ gamma; /* divisor, sum of color alpha weighting */ size_t count; /* alpha valus collected, number kernel values */ count=0; gamma=0.0; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; alpha=QuantumScale*(QuantumRange-k_pixels[u].opacity); count++; /* number of alpha values collected */ alpha*=(*k); /* include kernel weighting now */ gamma += alpha; /* normalize alpha weights only */ result.red += alpha*k_pixels[u].red; result.green += alpha*k_pixels[u].green; result.blue += alpha*k_pixels[u].blue; result.opacity += (*k)*k_pixels[u].opacity; if ( image->colorspace == CMYKColorspace) result.index+=alpha*GetPixelIndex(k_indexes+u); } k_pixels += virt_width; k_indexes += virt_width; } /* Sync'ed channels, all channels are modified */ gamma=PerceptibleReciprocal(gamma); if (count != 0) gamma*=(double) kernel->height*kernel->width/count; SetPixelRed(q,ClampToQuantum((MagickRealType) (gamma*result.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (gamma*result.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (gamma*result.blue))); SetPixelOpacity(q,ClampToQuantum(result.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(q_indexes+x,ClampToQuantum((MagickRealType) (gamma* result.index))); } break; case ErodeMorphology: /* Minimum Value within kernel neighbourhood ** ** NOTE that the kernel is not reflected for this operation! ** ** NOTE: in normal Greyscale Morphology, the kernel value should ** be added to the real value, this is currently not done, due to ** the nature of the boolean kernels being used. */ k = kernel->values; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k++) { if ( IsNaN(*k) || (*k) < 0.5 ) continue; Minimize(min.red, (double) k_pixels[u].red); Minimize(min.green, (double) k_pixels[u].green); Minimize(min.blue, (double) k_pixels[u].blue); Minimize(min.opacity, QuantumRange-(double) k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(min.index,(double) GetPixelIndex(k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } break; case DilateMorphology: /* Maximum Value within kernel neighbourhood ** ** NOTE for correct working of this operation for asymetrical ** kernels, the kernel needs to be applied in its reflected form. ** That is its values needs to be reversed. ** ** NOTE: in normal Greyscale Morphology, the kernel value should ** be added to the real value, this is currently not done, due to ** the nature of the boolean kernels being used. ** */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) || (*k) < 0.5 ) continue; Maximize(max.red, (double) k_pixels[u].red); Maximize(max.green, (double) k_pixels[u].green); Maximize(max.blue, (double) k_pixels[u].blue); Maximize(max.opacity, QuantumRange-(double) k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Maximize(max.index, (double) GetPixelIndex( k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } break; case HitAndMissMorphology: case ThinningMorphology: case ThickenMorphology: /* Minimum of Foreground Pixel minus Maxumum of Background Pixels ** ** NOTE that the kernel is not reflected for this operation, ** and consists of both foreground and background pixel ** neighbourhoods, 0.0 for background, and 1.0 for foreground ** with either Nan or 0.5 values for don't care. ** ** Note that this will never produce a meaningless negative ** result. Such results can cause Thinning/Thicken to not work ** correctly when used against a greyscale image. */ k = kernel->values; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k++) { if ( IsNaN(*k) ) continue; if ( (*k) > 0.7 ) { /* minimim of foreground pixels */ Minimize(min.red, (double) k_pixels[u].red); Minimize(min.green, (double) k_pixels[u].green); Minimize(min.blue, (double) k_pixels[u].blue); Minimize(min.opacity, QuantumRange-(double) k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(min.index,(double) GetPixelIndex( k_indexes+u)); } else if ( (*k) < 0.3 ) { /* maximum of background pixels */ Maximize(max.red, (double) k_pixels[u].red); Maximize(max.green, (double) k_pixels[u].green); Maximize(max.blue, (double) k_pixels[u].blue); Maximize(max.opacity, QuantumRange-(double) k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Maximize(max.index, (double) GetPixelIndex( k_indexes+u)); } } k_pixels += virt_width; k_indexes += virt_width; } /* Pattern Match if difference is positive */ min.red -= max.red; Maximize( min.red, 0.0 ); min.green -= max.green; Maximize( min.green, 0.0 ); min.blue -= max.blue; Maximize( min.blue, 0.0 ); min.opacity -= max.opacity; Maximize( min.opacity, 0.0 ); min.index -= max.index; Maximize( min.index, 0.0 ); break; case ErodeIntensityMorphology: /* Select Pixel with Minimum Intensity within kernel neighbourhood ** ** WARNING: the intensity test fails for CMYK and does not ** take into account the moderating effect of the alpha channel ** on the intensity. ** ** NOTE that the kernel is not reflected for this operation! */ k = kernel->values; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k++) { if ( IsNaN(*k) || (*k) < 0.5 ) continue; if ( result.red == 0.0 || GetPixelIntensity(image,&(k_pixels[u])) < GetPixelIntensity(result_image,q) ) { /* copy the whole pixel - no channel selection */ *q = k_pixels[u]; if ( result.red > 0.0 ) changes[id]++; result.red = 1.0; } } k_pixels += virt_width; k_indexes += virt_width; } break; case DilateIntensityMorphology: /* Select Pixel with Maximum Intensity within kernel neighbourhood ** ** WARNING: the intensity test fails for CMYK and does not ** take into account the moderating effect of the alpha channel ** on the intensity (yet). ** ** NOTE for correct working of this operation for asymetrical ** kernels, the kernel needs to be applied in its reflected form. ** That is its values needs to be reversed. */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) || (*k) < 0.5 ) continue; /* boolean kernel */ if ( result.red == 0.0 || GetPixelIntensity(image,&(k_pixels[u])) > GetPixelIntensity(result_image,q) ) { /* copy the whole pixel - no channel selection */ *q = k_pixels[u]; if ( result.red > 0.0 ) changes[id]++; result.red = 1.0; } } k_pixels += virt_width; k_indexes += virt_width; } break; case IterativeDistanceMorphology: /* Work out an iterative distance from black edge of a white image ** shape. Essentually white values are decreased to the smallest ** 'distance from edge' it can find. ** ** It works by adding kernel values to the neighbourhood, and and ** select the minimum value found. The kernel is rotated before ** use, so kernel distances match resulting distances, when a user ** provided asymmetric kernel is applied. ** ** ** This code is almost identical to True GrayScale Morphology But ** not quite. ** ** GreyDilate Kernel values added, maximum value found Kernel is ** rotated before use. ** ** GrayErode: Kernel values subtracted and minimum value found No ** kernel rotation used. ** ** Note the the Iterative Distance method is essentially a ** GrayErode, but with negative kernel values, and kernel ** rotation applied. */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index,(*k)+GetPixelIndex(k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } break; case UndefinedMorphology: default: break; /* Do nothing */ } /* Final mathematics of results (combine with original image?) ** ** NOTE: Difference Morphology operators Edge* and *Hat could also ** be done here but works better with iteration as a image difference ** in the controlling function (below). Thicken and Thinning however ** should be done here so thay can be iterated correctly. */ switch ( method ) { case HitAndMissMorphology: case ErodeMorphology: result = min; /* minimum of neighbourhood */ break; case DilateMorphology: result = max; /* maximum of neighbourhood */ break; case ThinningMorphology: /* subtract pattern match from original */ result.red -= min.red; result.green -= min.green; result.blue -= min.blue; result.opacity -= min.opacity; result.index -= min.index; break; case ThickenMorphology: /* Add the pattern matchs to the original */ result.red += min.red; result.green += min.green; result.blue += min.blue; result.opacity += min.opacity; result.index += min.index; break; default: /* result directly calculated or assigned */ break; } /* Assign the resulting pixel values - Clamping Result */ switch ( method ) { case UndefinedMorphology: case ConvolveMorphology: case DilateIntensityMorphology: case ErodeIntensityMorphology: break; /* full pixel was directly assigned - not a channel method */ default: if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(result.blue)); if ((channel & OpacityChannel) != 0 && image->matte != MagickFalse ) SetPixelAlpha(q,ClampToQuantum(result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+x,ClampToQuantum(result.index)); break; } /* Count up changed pixels */ if ( ( p[r].red != GetPixelRed(q) ) || ( p[r].green != GetPixelGreen(q) ) || ( p[r].blue != GetPixelBlue(q) ) || ( (image->matte != MagickFalse) && (p[r].opacity != GetPixelOpacity(q))) || ( (image->colorspace == CMYKColorspace) && (GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) ) changes[id]++; p++; q++; } /* x */ if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MorphologyTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* y */ q_view=DestroyCacheView(q_view); p_view=DestroyCacheView(p_view); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) changed+=changes[i]; changes=(size_t *) RelinquishMagickMemory(changes); return(status ? (ssize_t)changed : -1); } /* This is almost identical to the MorphologyPrimative() function above, ** but will apply the primitive directly to the actual image using two ** passes, once in each direction, with the results of the previous (and ** current) row being re-used. ** ** That is after each row is 'Sync'ed' into the image, the next row will ** make use of those values as part of the calculation of the next row. ** It then repeats, but going in the oppisite (bottom-up) direction. ** ** Because of this 're-use of results' this function can not make use ** of multi-threaded, parellel processing. */ static ssize_t MorphologyPrimitiveDirect(Image *image, const MorphologyMethod method, const ChannelType channel, const KernelInfo *kernel,ExceptionInfo *exception) { CacheView *auth_view, *virt_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y, offx, offy; size_t changed, virt_width; status=MagickTrue; changed=0; progress=0; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Some methods (including convolve) needs use a reflected kernel. * Adjust 'origin' offsets to loop though kernel as a reflection. */ offx = kernel->x; offy = kernel->y; switch(method) { case DistanceMorphology: case VoronoiMorphology: /* kernel needs to used with reflection about origin */ offx = (ssize_t) kernel->width-offx-1; offy = (ssize_t) kernel->height-offy-1; break; #if 0 case ?????Morphology: /* kernel is used as is, without reflection */ break; #endif default: assert("Not a PrimativeDirect Morphology Method" != (char *) NULL); break; } /* DO NOT THREAD THIS CODE! */ /* two views into same image (virtual, and actual) */ virt_view=AcquireVirtualCacheView(image,exception); auth_view=AcquireAuthenticCacheView(image,exception); virt_width=image->columns+kernel->width-1; for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register const IndexPacket *magick_restrict p_indexes; register PixelPacket *magick_restrict q; register IndexPacket *magick_restrict q_indexes; register ssize_t x; ssize_t r; /* NOTE read virtual pixels, and authentic pixels, from the same image! ** we read using virtual to get virtual pixel handling, but write back ** into the same image. ** ** Only top half of kernel is processed as we do a single pass downward ** through the image iterating the distance function as we go. */ if (status == MagickFalse) break; p=GetCacheViewVirtualPixels(virt_view, -offx, y-offy, virt_width, (size_t) offy+1, exception); q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) status=MagickFalse; if (status == MagickFalse) break; p_indexes=GetCacheViewVirtualIndexQueue(virt_view); q_indexes=GetCacheViewAuthenticIndexQueue(auth_view); /* offset to origin in 'p'. while 'q' points to it directly */ r = (ssize_t) virt_width*offy + offx; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t v; register ssize_t u; register const double *magick_restrict k; register const PixelPacket *magick_restrict k_pixels; register const IndexPacket *magick_restrict k_indexes; MagickPixelPacket result; /* Starting Defaults */ GetMagickPixelPacket(image,&result); SetMagickPixelPacket(image,q,q_indexes,&result); if ( method != VoronoiMorphology ) result.opacity = QuantumRange - result.opacity; switch ( method ) { case DistanceMorphology: /* Add kernel Value and select the minimum value found. */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v <= (ssize_t) offy; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } /* repeat with the just processed pixels of this row */ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ]; k_pixels = q-offx; k_indexes = q_indexes-offx; for (u=0; u < (ssize_t) offx; u++, k--) { if ( x+u-offx < 0 ) continue; /* off the edge! */ if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u)); } break; case VoronoiMorphology: /* Apply Distance to 'Matte' channel, while coping the color ** values of the closest pixel. ** ** This is experimental, and realy the 'alpha' component should ** be completely separate 'masking' channel so that alpha can ** also be used as part of the results. */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes+x; for (v=0; v <= (ssize_t) offy; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; if( result.opacity > (*k)+k_pixels[u].opacity ) { SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u], &result); result.opacity += *k; } } k_pixels += virt_width; k_indexes += virt_width; } /* repeat with the just processed pixels of this row */ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ]; k_pixels = q-offx; k_indexes = q_indexes-offx; for (u=0; u < (ssize_t) offx; u++, k--) { if ( x+u-offx < 0 ) continue; /* off the edge! */ if ( IsNaN(*k) ) continue; if( result.opacity > (*k)+k_pixels[u].opacity ) { SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u], &result); result.opacity += *k; } } break; default: /* result directly calculated or assigned */ break; } /* Assign the resulting pixel values - Clamping Result */ switch ( method ) { case VoronoiMorphology: SetPixelPacket(image,&result,q,q_indexes); break; default: if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(result.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelAlpha(q,ClampToQuantum(result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+x,ClampToQuantum(result.index)); break; } /* Count up changed pixels */ if ( ( p[r].red != GetPixelRed(q) ) || ( p[r].green != GetPixelGreen(q) ) || ( p[r].blue != GetPixelBlue(q) ) || ( (image->matte != MagickFalse) && (p[r].opacity != GetPixelOpacity(q))) || ( (image->colorspace == CMYKColorspace) && (GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) ) changed++; /* The pixel was changed in some way! */ p++; /* increment pixel buffers */ q++; } /* x */ if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; if (SetImageProgress(image,MorphologyTag,progress,image->rows) == MagickFalse ) status=MagickFalse; } } /* y */ /* Do the reversed pass through the image */ for (y=(ssize_t)image->rows-1; y >= 0; y--) { register const PixelPacket *magick_restrict p; register const IndexPacket *magick_restrict p_indexes; register PixelPacket *magick_restrict q; register IndexPacket *magick_restrict q_indexes; register ssize_t x; ssize_t r; if (status == MagickFalse) break; /* NOTE read virtual pixels, and authentic pixels, from the same image! ** we read using virtual to get virtual pixel handling, but write back ** into the same image. ** ** Only the bottom half of the kernel will be processes as we ** up the image. */ p=GetCacheViewVirtualPixels(virt_view, -offx, y, virt_width, (size_t) kernel->y+1, exception); q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) status=MagickFalse; if (status == MagickFalse) break; p_indexes=GetCacheViewVirtualIndexQueue(virt_view); q_indexes=GetCacheViewAuthenticIndexQueue(auth_view); /* adjust positions to end of row */ p += image->columns-1; q += image->columns-1; /* offset to origin in 'p'. while 'q' points to it directly */ r = offx; for (x=(ssize_t)image->columns-1; x >= 0; x--) { ssize_t v; register ssize_t u; register const double *magick_restrict k; register const PixelPacket *magick_restrict k_pixels; register const IndexPacket *magick_restrict k_indexes; MagickPixelPacket result; /* Default - previously modified pixel */ GetMagickPixelPacket(image,&result); SetMagickPixelPacket(image,q,q_indexes,&result); if ( method != VoronoiMorphology ) result.opacity = QuantumRange - result.opacity; switch ( method ) { case DistanceMorphology: /* Add kernel Value and select the minimum value found. */ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ]; k_pixels = p; k_indexes = p_indexes+x; for (v=offy; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index,(*k)+GetPixelIndex(k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } /* repeat with the just processed pixels of this row */ k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ]; k_pixels = q-offx; k_indexes = q_indexes-offx; for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) { if ( (x+u-offx) >= (ssize_t)image->columns ) continue; if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u)); } break; case VoronoiMorphology: /* Apply Distance to 'Matte' channel, coping the closest color. ** ** This is experimental, and realy the 'alpha' component should ** be completely separate 'masking' channel. */ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ]; k_pixels = p; k_indexes = p_indexes+x; for (v=offy; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; if( result.opacity > (*k)+k_pixels[u].opacity ) { SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u], &result); result.opacity += *k; } } k_pixels += virt_width; k_indexes += virt_width; } /* repeat with the just processed pixels of this row */ k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ]; k_pixels = q-offx; k_indexes = q_indexes-offx; for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) { if ( (x+u-offx) >= (ssize_t)image->columns ) continue; if ( IsNaN(*k) ) continue; if( result.opacity > (*k)+k_pixels[u].opacity ) { SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u], &result); result.opacity += *k; } } break; default: /* result directly calculated or assigned */ break; } /* Assign the resulting pixel values - Clamping Result */ switch ( method ) { case VoronoiMorphology: SetPixelPacket(image,&result,q,q_indexes); break; default: if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(result.blue)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetPixelAlpha(q,ClampToQuantum(result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+x,ClampToQuantum(result.index)); break; } /* Count up changed pixels */ if ( ( p[r].red != GetPixelRed(q) ) || ( p[r].green != GetPixelGreen(q) ) || ( p[r].blue != GetPixelBlue(q) ) || ( (image->matte != MagickFalse) && (p[r].opacity != GetPixelOpacity(q))) || ( (image->colorspace == CMYKColorspace) && (GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) ) changed++; /* The pixel was changed in some way! */ p--; /* go backward through pixel buffers */ q--; } /* x */ if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; if ( SetImageProgress(image,MorphologyTag,progress,image->rows) == MagickFalse ) status=MagickFalse; } } /* y */ auth_view=DestroyCacheView(auth_view); virt_view=DestroyCacheView(virt_view); return(status ? (ssize_t) changed : -1); } /* Apply a Morphology by calling one of the above low level primitive ** application functions. This function handles any iteration loops, ** composition or re-iteration of results, and compound morphology methods ** that is based on multiple low-level (staged) morphology methods. ** ** Basically this provides the complex grue between the requested morphology ** method and raw low-level implementation (above). */ MagickExport Image *MorphologyApply(const Image *image, const ChannelType channel,const MorphologyMethod method, const ssize_t iterations, const KernelInfo *kernel, const CompositeOperator compose, const double bias, ExceptionInfo *exception) { CompositeOperator curr_compose; Image *curr_image, /* Image we are working with or iterating */ *work_image, /* secondary image for primitive iteration */ *save_image, /* saved image - for 'edge' method only */ *rslt_image; /* resultant image - after multi-kernel handling */ KernelInfo *reflected_kernel, /* A reflected copy of the kernel (if needed) */ *norm_kernel, /* the current normal un-reflected kernel */ *rflt_kernel, /* the current reflected kernel (if needed) */ *this_kernel; /* the kernel being applied */ MorphologyMethod primitive; /* the current morphology primitive being applied */ CompositeOperator rslt_compose; /* multi-kernel compose method for results to use */ MagickBooleanType special, /* do we use a direct modify function? */ verbose; /* verbose output of results */ size_t method_loop, /* Loop 1: number of compound method iterations (norm 1) */ method_limit, /* maximum number of compound method iterations */ kernel_number, /* Loop 2: the kernel number being applied */ stage_loop, /* Loop 3: primitive loop for compound morphology */ stage_limit, /* how many primitives are in this compound */ kernel_loop, /* Loop 4: iterate the kernel over image */ kernel_limit, /* number of times to iterate kernel */ count, /* total count of primitive steps applied */ kernel_changed, /* total count of changed using iterated kernel */ method_changed; /* total count of changed over method iteration */ ssize_t changed; /* number pixels changed by last primitive operation */ char v_info[MaxTextExtent]; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); count = 0; /* number of low-level morphology primitives performed */ if ( iterations == 0 ) return((Image *) NULL); /* null operation - nothing to do! */ kernel_limit = (size_t) iterations; if ( iterations < 0 ) /* negative interations = infinite (well alomst) */ kernel_limit = image->columns>image->rows ? image->columns : image->rows; verbose = IsMagickTrue(GetImageArtifact(image,"debug")); /* initialise for cleanup */ curr_image = (Image *) image; curr_compose = image->compose; (void) curr_compose; work_image = save_image = rslt_image = (Image *) NULL; reflected_kernel = (KernelInfo *) NULL; /* Initialize specific methods * + which loop should use the given iteratations * + how many primitives make up the compound morphology * + multi-kernel compose method to use (by default) */ method_limit = 1; /* just do method once, unless otherwise set */ stage_limit = 1; /* assume method is not a compound */ special = MagickFalse; /* assume it is NOT a direct modify primitive */ rslt_compose = compose; /* and we are composing multi-kernels as given */ switch( method ) { case SmoothMorphology: /* 4 primitive compound morphology */ stage_limit = 4; break; case OpenMorphology: /* 2 primitive compound morphology */ case OpenIntensityMorphology: case TopHatMorphology: case CloseMorphology: case CloseIntensityMorphology: case BottomHatMorphology: case EdgeMorphology: stage_limit = 2; break; case HitAndMissMorphology: rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */ /* FALL THUR */ case ThinningMorphology: case ThickenMorphology: method_limit = kernel_limit; /* iterate the whole method */ kernel_limit = 1; /* do not do kernel iteration */ break; case DistanceMorphology: case VoronoiMorphology: special = MagickTrue; /* use special direct primative */ break; default: break; } /* Apply special methods with special requirments ** For example, single run only, or post-processing requirements */ if ( special != MagickFalse ) { rslt_image=CloneImage(image,0,0,MagickTrue,exception); if (rslt_image == (Image *) NULL) goto error_cleanup; if (SetImageStorageClass(rslt_image,DirectClass) == MagickFalse) { InheritException(exception,&rslt_image->exception); goto error_cleanup; } changed = MorphologyPrimitiveDirect(rslt_image, method, channel, kernel, exception); if ( verbose != MagickFalse ) (void) (void) FormatLocaleFile(stderr, "%s:%.20g.%.20g #%.20g => Changed %.20g\n", CommandOptionToMnemonic(MagickMorphologyOptions, method), 1.0,0.0,1.0, (double) changed); if ( changed < 0 ) goto error_cleanup; if ( method == VoronoiMorphology ) { /* Preserve the alpha channel of input image - but turned off */ (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel); (void) CompositeImageChannel(rslt_image, DefaultChannels, CopyOpacityCompositeOp, image, 0, 0); (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel); } goto exit_cleanup; } /* Handle user (caller) specified multi-kernel composition method */ if ( compose != UndefinedCompositeOp ) rslt_compose = compose; /* override default composition for method */ if ( rslt_compose == UndefinedCompositeOp ) rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */ /* Some methods require a reflected kernel to use with primitives. * Create the reflected kernel for those methods. */ switch ( method ) { case CorrelateMorphology: case CloseMorphology: case CloseIntensityMorphology: case BottomHatMorphology: case SmoothMorphology: reflected_kernel = CloneKernelInfo(kernel); if (reflected_kernel == (KernelInfo *) NULL) goto error_cleanup; RotateKernelInfo(reflected_kernel,180); break; default: break; } /* Loops around more primitive morpholgy methods ** erose, dilate, open, close, smooth, edge, etc... */ /* Loop 1: iterate the compound method */ method_loop = 0; method_changed = 1; while ( method_loop < method_limit && method_changed > 0 ) { method_loop++; method_changed = 0; /* Loop 2: iterate over each kernel in a multi-kernel list */ norm_kernel = (KernelInfo *) kernel; this_kernel = (KernelInfo *) kernel; rflt_kernel = reflected_kernel; kernel_number = 0; while ( norm_kernel != NULL ) { /* Loop 3: Compound Morphology Staging - Select Primative to apply */ stage_loop = 0; /* the compound morphology stage number */ while ( stage_loop < stage_limit ) { stage_loop++; /* The stage of the compound morphology */ /* Select primitive morphology for this stage of compound method */ this_kernel = norm_kernel; /* default use unreflected kernel */ primitive = method; /* Assume method is a primitive */ switch( method ) { case ErodeMorphology: /* just erode */ case EdgeInMorphology: /* erode and image difference */ primitive = ErodeMorphology; break; case DilateMorphology: /* just dilate */ case EdgeOutMorphology: /* dilate and image difference */ primitive = DilateMorphology; break; case OpenMorphology: /* erode then dialate */ case TopHatMorphology: /* open and image difference */ primitive = ErodeMorphology; if ( stage_loop == 2 ) primitive = DilateMorphology; break; case OpenIntensityMorphology: primitive = ErodeIntensityMorphology; if ( stage_loop == 2 ) primitive = DilateIntensityMorphology; break; case CloseMorphology: /* dilate, then erode */ case BottomHatMorphology: /* close and image difference */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateMorphology; if ( stage_loop == 2 ) primitive = ErodeMorphology; break; case CloseIntensityMorphology: this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateIntensityMorphology; if ( stage_loop == 2 ) primitive = ErodeIntensityMorphology; break; case SmoothMorphology: /* open, close */ switch ( stage_loop ) { case 1: /* start an open method, which starts with Erode */ primitive = ErodeMorphology; break; case 2: /* now Dilate the Erode */ primitive = DilateMorphology; break; case 3: /* Reflect kernel a close */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateMorphology; break; case 4: /* Finish the Close */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = ErodeMorphology; break; } break; case EdgeMorphology: /* dilate and erode difference */ primitive = DilateMorphology; if ( stage_loop == 2 ) { save_image = curr_image; /* save the image difference */ curr_image = (Image *) image; primitive = ErodeMorphology; } break; case CorrelateMorphology: /* A Correlation is a Convolution with a reflected kernel. ** However a Convolution is a weighted sum using a reflected ** kernel. It may seem stange to convert a Correlation into a ** Convolution as the Correlation is the simplier method, but ** Convolution is much more commonly used, and it makes sense to ** implement it directly so as to avoid the need to duplicate the ** kernel when it is not required (which is typically the ** default). */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = ConvolveMorphology; break; default: break; } assert( this_kernel != (KernelInfo *) NULL ); /* Extra information for debugging compound operations */ if ( verbose != MagickFalse ) { if ( stage_limit > 1 ) (void) FormatLocaleString(v_info,MaxTextExtent,"%s:%.20g.%.20g -> ", CommandOptionToMnemonic(MagickMorphologyOptions,method),(double) method_loop,(double) stage_loop); else if ( primitive != method ) (void) FormatLocaleString(v_info, MaxTextExtent, "%s:%.20g -> ", CommandOptionToMnemonic(MagickMorphologyOptions, method),(double) method_loop); else v_info[0] = '\0'; } /* Loop 4: Iterate the kernel with primitive */ kernel_loop = 0; kernel_changed = 0; changed = 1; while ( kernel_loop < kernel_limit && changed > 0 ) { kernel_loop++; /* the iteration of this kernel */ /* Create a clone as the destination image, if not yet defined */ if ( work_image == (Image *) NULL ) { work_image=CloneImage(image,0,0,MagickTrue,exception); if (work_image == (Image *) NULL) goto error_cleanup; if (SetImageStorageClass(work_image,DirectClass) == MagickFalse) { InheritException(exception,&work_image->exception); goto error_cleanup; } /* work_image->type=image->type; ??? */ } /* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */ count++; changed = MorphologyPrimitive(curr_image, work_image, primitive, channel, this_kernel, bias, exception); if ( verbose != MagickFalse ) { if ( kernel_loop > 1 ) (void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */ (void) (void) FormatLocaleFile(stderr, "%s%s%s:%.20g.%.20g #%.20g => Changed %.20g", v_info,CommandOptionToMnemonic(MagickMorphologyOptions, primitive),(this_kernel == rflt_kernel ) ? "*" : "", (double) (method_loop+kernel_loop-1),(double) kernel_number, (double) count,(double) changed); } if ( changed < 0 ) goto error_cleanup; kernel_changed += changed; method_changed += changed; /* prepare next loop */ { Image *tmp = work_image; /* swap images for iteration */ work_image = curr_image; curr_image = tmp; } if ( work_image == image ) work_image = (Image *) NULL; /* replace input 'image' */ } /* End Loop 4: Iterate the kernel with primitive */ if ( verbose != MagickFalse && kernel_changed != (size_t)changed ) (void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed); if ( verbose != MagickFalse && stage_loop < stage_limit ) (void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */ #if 0 (void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image); (void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image); (void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image); (void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image); (void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image); #endif } /* End Loop 3: Primative (staging) Loop for Coumpound Methods */ /* Final Post-processing for some Compound Methods ** ** The removal of any 'Sync' channel flag in the Image Compositon ** below ensures the methematical compose method is applied in a ** purely mathematical way, and only to the selected channels. ** Turn off SVG composition 'alpha blending'. */ switch( method ) { case EdgeOutMorphology: case EdgeInMorphology: case TopHatMorphology: case BottomHatMorphology: if ( verbose != MagickFalse ) (void) FormatLocaleFile(stderr, "\n%s: Difference with original image", CommandOptionToMnemonic(MagickMorphologyOptions,method)); (void) CompositeImageChannel(curr_image,(ChannelType) (channel & ~SyncChannels),DifferenceCompositeOp,image,0,0); break; case EdgeMorphology: if ( verbose != MagickFalse ) (void) FormatLocaleFile(stderr, "\n%s: Difference of Dilate and Erode", CommandOptionToMnemonic(MagickMorphologyOptions,method)); (void) CompositeImageChannel(curr_image,(ChannelType) (channel & ~SyncChannels),DifferenceCompositeOp,save_image,0,0); save_image = DestroyImage(save_image); /* finished with save image */ break; default: break; } /* multi-kernel handling: re-iterate, or compose results */ if ( kernel->next == (KernelInfo *) NULL ) rslt_image = curr_image; /* just return the resulting image */ else if ( rslt_compose == NoCompositeOp ) { if ( verbose != MagickFalse ) { if ( this_kernel->next != (KernelInfo *) NULL ) (void) FormatLocaleFile(stderr, " (re-iterate)"); else (void) FormatLocaleFile(stderr, " (done)"); } rslt_image = curr_image; /* return result, and re-iterate */ } else if ( rslt_image == (Image *) NULL) { if ( verbose != MagickFalse ) (void) FormatLocaleFile(stderr, " (save for compose)"); rslt_image = curr_image; curr_image = (Image *) image; /* continue with original image */ } else { /* Add the new 'current' result to the composition ** ** The removal of any 'Sync' channel flag in the Image Compositon ** below ensures the methematical compose method is applied in a ** purely mathematical way, and only to the selected channels. ** IE: Turn off SVG composition 'alpha blending'. */ if ( verbose != MagickFalse ) (void) FormatLocaleFile(stderr, " (compose \"%s\")", CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) ); (void) CompositeImageChannel(rslt_image, (ChannelType) (channel & ~SyncChannels), rslt_compose, curr_image, 0, 0); curr_image = DestroyImage(curr_image); curr_image = (Image *) image; /* continue with original image */ } if ( verbose != MagickFalse ) (void) FormatLocaleFile(stderr, "\n"); /* loop to the next kernel in a multi-kernel list */ norm_kernel = norm_kernel->next; if ( rflt_kernel != (KernelInfo *) NULL ) rflt_kernel = rflt_kernel->next; kernel_number++; } /* End Loop 2: Loop over each kernel */ } /* End Loop 1: compound method interation */ goto exit_cleanup; /* Yes goto's are bad, but it makes cleanup lot more efficient */ error_cleanup: if ( curr_image == rslt_image ) curr_image = (Image *) NULL; if ( rslt_image != (Image *) NULL ) rslt_image = DestroyImage(rslt_image); exit_cleanup: if ( curr_image == rslt_image || curr_image == image ) curr_image = (Image *) NULL; if ( curr_image != (Image *) NULL ) curr_image = DestroyImage(curr_image); if ( work_image != (Image *) NULL ) work_image = DestroyImage(work_image); if ( save_image != (Image *) NULL ) save_image = DestroyImage(save_image); if ( reflected_kernel != (KernelInfo *) NULL ) reflected_kernel = DestroyKernelInfo(reflected_kernel); return(rslt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h o l o g y I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MorphologyImageChannel() applies a user supplied kernel to the image % according to the given mophology method. % % This function applies any and all user defined settings before calling % the above internal function MorphologyApply(). % % User defined settings include... % * Output Bias for Convolution and correlation ("-bias" or "-define convolve:bias=??") % * Kernel Scale/normalize settings ("-set 'option:convolve:scale'") % This can also includes the addition of a scaled unity kernel. % * Show Kernel being applied ("-set option:showKernel 1") % % The format of the MorphologyImage method is: % % Image *MorphologyImage(const Image *image,MorphologyMethod method, % const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception) % % Image *MorphologyImageChannel(const Image *image, const ChannelType % channel,MorphologyMethod method,const ssize_t iterations, % KernelInfo *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: the morphology method to be applied. % % o iterations: apply the operation this many times (or no change). % A value of -1 means loop until no change found. % How this is applied may depend on the morphology method. % Typically this is a value of 1. % % o channel: the channel type. % % o kernel: An array of double representing the morphology kernel. % Warning: kernel may be normalized for the Convolve method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphologyImage(const Image *image, const MorphologyMethod method,const ssize_t iterations, const KernelInfo *kernel,ExceptionInfo *exception) { Image *morphology_image; morphology_image=MorphologyImageChannel(image,DefaultChannels,method, iterations,kernel,exception); return(morphology_image); } MagickExport Image *MorphologyImageChannel(const Image *image, const ChannelType channel,const MorphologyMethod method, const ssize_t iterations,const KernelInfo *kernel,ExceptionInfo *exception) { KernelInfo *curr_kernel; CompositeOperator compose; double bias; Image *morphology_image; /* Apply Convolve/Correlate Normalization and Scaling Factors. * This is done BEFORE the ShowKernelInfo() function is called so that * users can see the results of the 'option:convolve:scale' option. */ curr_kernel = (KernelInfo *) kernel; bias=image->bias; if ((method == ConvolveMorphology) || (method == CorrelateMorphology)) { const char *artifact; artifact = GetImageArtifact(image,"convolve:bias"); if (artifact != (const char *) NULL) bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0); artifact = GetImageArtifact(image,"convolve:scale"); if ( artifact != (const char *) NULL ) { if ( curr_kernel == kernel ) curr_kernel = CloneKernelInfo(kernel); if (curr_kernel == (KernelInfo *) NULL) { curr_kernel=DestroyKernelInfo(curr_kernel); return((Image *) NULL); } ScaleGeometryKernelInfo(curr_kernel, artifact); } } /* display the (normalized) kernel via stderr */ if ( IsMagickTrue(GetImageArtifact(image,"showKernel")) || IsMagickTrue(GetImageArtifact(image,"convolve:showKernel")) || IsMagickTrue(GetImageArtifact(image,"morphology:showKernel")) ) ShowKernelInfo(curr_kernel); /* Override the default handling of multi-kernel morphology results * If 'Undefined' use the default method * If 'None' (default for 'Convolve') re-iterate previous result * Otherwise merge resulting images using compose method given. * Default for 'HitAndMiss' is 'Lighten'. */ { const char *artifact; compose = UndefinedCompositeOp; /* use default for method */ artifact = GetImageArtifact(image,"morphology:compose"); if ( artifact != (const char *) NULL) compose = (CompositeOperator) ParseCommandOption( MagickComposeOptions,MagickFalse,artifact); } /* Apply the Morphology */ morphology_image = MorphologyApply(image, channel, method, iterations, curr_kernel, compose, bias, exception); /* Cleanup and Exit */ if ( curr_kernel != kernel ) curr_kernel=DestroyKernelInfo(curr_kernel); return(morphology_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R o t a t e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotateKernelInfo() rotates the kernel by the angle given. % % Currently it is restricted to 90 degree angles, of either 1D kernels % or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels. % It will ignore usless rotations for specific 'named' built-in kernels. % % The format of the RotateKernelInfo method is: % % void RotateKernelInfo(KernelInfo *kernel, double angle) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o angle: angle to rotate in degrees % % This function is currently internal to this module only, but can be exported % to other modules if needed. */ static void RotateKernelInfo(KernelInfo *kernel, double angle) { /* angle the lower kernels first */ if ( kernel->next != (KernelInfo *) NULL) RotateKernelInfo(kernel->next, angle); /* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical ** ** TODO: expand beyond simple 90 degree rotates, flips and flops */ /* Modulus the angle */ angle = fmod(angle, 360.0); if ( angle < 0 ) angle += 360.0; if ( 337.5 < angle || angle <= 22.5 ) return; /* Near zero angle - no change! - At least not at this time */ /* Handle special cases */ switch (kernel->type) { /* These built-in kernels are cylindrical kernels, rotating is useless */ case GaussianKernel: case DoGKernel: case LoGKernel: case DiskKernel: case PeaksKernel: case LaplacianKernel: case ChebyshevKernel: case ManhattanKernel: case EuclideanKernel: return; /* These may be rotatable at non-90 angles in the future */ /* but simply rotating them in multiples of 90 degrees is useless */ case SquareKernel: case DiamondKernel: case PlusKernel: case CrossKernel: return; /* These only allows a +/-90 degree rotation (by transpose) */ /* A 180 degree rotation is useless */ case BlurKernel: if ( 135.0 < angle && angle <= 225.0 ) return; if ( 225.0 < angle && angle <= 315.0 ) angle -= 180; break; default: break; } /* Attempt rotations by 45 degrees -- 3x3 kernels only */ if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 ) { if ( kernel->width == 3 && kernel->height == 3 ) { /* Rotate a 3x3 square by 45 degree angle */ double t = kernel->values[0]; kernel->values[0] = kernel->values[3]; kernel->values[3] = kernel->values[6]; kernel->values[6] = kernel->values[7]; kernel->values[7] = kernel->values[8]; kernel->values[8] = kernel->values[5]; kernel->values[5] = kernel->values[2]; kernel->values[2] = kernel->values[1]; kernel->values[1] = t; /* rotate non-centered origin */ if ( kernel->x != 1 || kernel->y != 1 ) { ssize_t x,y; x = (ssize_t) kernel->x-1; y = (ssize_t) kernel->y-1; if ( x == y ) x = 0; else if ( x == 0 ) x = -y; else if ( x == -y ) y = 0; else if ( y == 0 ) y = x; kernel->x = (ssize_t) x+1; kernel->y = (ssize_t) y+1; } angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */ kernel->angle = fmod(kernel->angle+45.0, 360.0); } else perror("Unable to rotate non-3x3 kernel by 45 degrees"); } if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 ) { if ( kernel->width == 1 || kernel->height == 1 ) { /* Do a transpose of a 1 dimensional kernel, ** which results in a fast 90 degree rotation of some type. */ ssize_t t; t = (ssize_t) kernel->width; kernel->width = kernel->height; kernel->height = (size_t) t; t = kernel->x; kernel->x = kernel->y; kernel->y = t; if ( kernel->width == 1 ) { angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */ kernel->angle = fmod(kernel->angle+90.0, 360.0); } else { angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */ kernel->angle = fmod(kernel->angle+270.0, 360.0); } } else if ( kernel->width == kernel->height ) { /* Rotate a square array of values by 90 degrees */ { register size_t i,j,x,y; register double *k,t; k=kernel->values; for( i=0, x=kernel->width-1; i<=x; i++, x--) for( j=0, y=kernel->height-1; j<y; j++, y--) { t = k[i+j*kernel->width]; k[i+j*kernel->width] = k[j+x*kernel->width]; k[j+x*kernel->width] = k[x+y*kernel->width]; k[x+y*kernel->width] = k[y+i*kernel->width]; k[y+i*kernel->width] = t; } } /* rotate the origin - relative to center of array */ { register ssize_t x,y; x = (ssize_t) (kernel->x*2-kernel->width+1); y = (ssize_t) (kernel->y*2-kernel->height+1); kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2; kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2; } angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */ kernel->angle = fmod(kernel->angle+90.0, 360.0); } else perror("Unable to rotate a non-square, non-linear kernel 90 degrees"); } if ( 135.0 < angle && angle <= 225.0 ) { /* For a 180 degree rotation - also know as a reflection * This is actually a very very common operation! * Basically all that is needed is a reversal of the kernel data! * And a reflection of the origon */ double t; register double *k; size_t i, j; k=kernel->values; for ( i=0, j=kernel->width*kernel->height-1; i<j; i++, j--) t=k[i], k[i]=k[j], k[j]=t; kernel->x = (ssize_t) kernel->width - kernel->x - 1; kernel->y = (ssize_t) kernel->height - kernel->y - 1; angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */ kernel->angle = fmod(kernel->angle+180.0, 360.0); } /* At this point angle should at least between -45 (315) and +45 degrees * In the future some form of non-orthogonal angled rotates could be * performed here, posibily with a linear kernel restriction. */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e G e o m e t r y K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleGeometryKernelInfo() takes a geometry argument string, typically % provided as a "-set option:convolve:scale {geometry}" user setting, % and modifies the kernel according to the parsed arguments of that setting. % % The first argument (and any normalization flags) are passed to % ScaleKernelInfo() to scale/normalize the kernel. The second argument % is then passed to UnityAddKernelInfo() to add a scled unity kernel % into the scaled/normalized kernel. % % The format of the ScaleGeometryKernelInfo method is: % % void ScaleGeometryKernelInfo(KernelInfo *kernel, % const double scaling_factor,const MagickStatusType normalize_flags) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to modify % % o geometry: % The geometry string to parse, typically from the user provided % "-set option:convolve:scale {geometry}" setting. % */ MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel, const char *geometry) { GeometryFlags flags; GeometryInfo args; SetGeometryInfo(&args); flags = (GeometryFlags) ParseGeometry(geometry, &args); #if 0 /* For Debugging Geometry Input */ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", flags, args.rho, args.sigma, args.xi, args.psi ); #endif if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/ args.rho *= 0.01, args.sigma *= 0.01; if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */ args.rho = 1.0; if ( (flags & SigmaValue) == 0 ) args.sigma = 0.0; /* Scale/Normalize the input kernel */ ScaleKernelInfo(kernel, args.rho, flags); /* Add Unity Kernel, for blending with original */ if ( (flags & SigmaValue) != 0 ) UnityAddKernelInfo(kernel, args.sigma); return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleKernelInfo() scales the given kernel list by the given amount, with or % without normalization of the sum of the kernel values (as per given flags). % % By default (no flags given) the values within the kernel is scaled % directly using given scaling factor without change. % % If either of the two 'normalize_flags' are given the kernel will first be % normalized and then further scaled by the scaling factor value given. % % Kernel normalization ('normalize_flags' given) is designed to ensure that % any use of the kernel scaling factor with 'Convolve' or 'Correlate' % morphology methods will fall into -1.0 to +1.0 range. Note that for % non-HDRI versions of IM this may cause images to have any negative results % clipped, unless some 'bias' is used. % % More specifically. Kernels which only contain positive values (such as a % 'Gaussian' kernel) will be scaled so that those values sum to +1.0, % ensuring a 0.0 to +1.0 output range for non-HDRI images. % % For Kernels that contain some negative values, (such as 'Sharpen' kernels) % the kernel will be scaled by the absolute of the sum of kernel values, so % that it will generally fall within the +/- 1.0 range. % % For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel % will be scaled by just the sum of the postive values, so that its output % range will again fall into the +/- 1.0 range. % % For special kernels designed for locating shapes using 'Correlate', (often % only containing +1 and -1 values, representing foreground/brackground % matching) a special normalization method is provided to scale the positive % values separately to those of the negative values, so the kernel will be % forced to become a zero-sum kernel better suited to such searches. % % WARNING: Correct normalization of the kernel assumes that the '*_range' % attributes within the kernel structure have been correctly set during the % kernels creation. % % NOTE: The values used for 'normalize_flags' have been selected specifically % to match the use of geometry options, so that '!' means NormalizeValue, '^' % means CorrelateNormalizeValue. All other GeometryFlags values are ignored. % % The format of the ScaleKernelInfo method is: % % void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor, % const MagickStatusType normalize_flags ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o scaling_factor: % multiply all values (after normalization) by this factor if not % zero. If the kernel is normalized regardless of any flags. % % o normalize_flags: % GeometryFlags defining normalization method to use. % specifically: NormalizeValue, CorrelateNormalizeValue, % and/or PercentValue % */ MagickExport void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,const GeometryFlags normalize_flags) { register ssize_t i; register double pos_scale, neg_scale; /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags); /* Normalization of Kernel */ pos_scale = 1.0; if ( (normalize_flags&NormalizeValue) != 0 ) { if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon ) /* non-zero-summing kernel (generally positive) */ pos_scale = fabs(kernel->positive_range + kernel->negative_range); else /* zero-summing kernel */ pos_scale = kernel->positive_range; } /* Force kernel into a normalized zero-summing kernel */ if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) { pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon ) ? kernel->positive_range : 1.0; neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon ) ? -kernel->negative_range : 1.0; } else neg_scale = pos_scale; /* finialize scaling_factor for positive and negative components */ pos_scale = scaling_factor/pos_scale; neg_scale = scaling_factor/neg_scale; for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) if ( ! IsNaN(kernel->values[i]) ) kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale; /* convolution output range */ kernel->positive_range *= pos_scale; kernel->negative_range *= neg_scale; /* maximum and minimum values in kernel */ kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale; kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale; /* swap kernel settings if user's scaling factor is negative */ if ( scaling_factor < MagickEpsilon ) { double t; t = kernel->positive_range; kernel->positive_range = kernel->negative_range; kernel->negative_range = t; t = kernel->maximum; kernel->maximum = kernel->minimum; kernel->minimum = 1; } return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h o w K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShowKernelInfo() outputs the details of the given kernel defination to % standard error, generally due to a users 'showKernel' option request. % % The format of the ShowKernelInfo method is: % % void ShowKernelInfo(const KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % */ MagickExport void ShowKernelInfo(const KernelInfo *kernel) { const KernelInfo *k; size_t c, i, u, v; for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) { (void) FormatLocaleFile(stderr, "Kernel"); if ( kernel->next != (KernelInfo *) NULL ) (void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c ); (void) FormatLocaleFile(stderr, " \"%s", CommandOptionToMnemonic(MagickKernelOptions, k->type) ); if ( fabs(k->angle) >= MagickEpsilon ) (void) FormatLocaleFile(stderr, "@%lg", k->angle); (void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long) k->width,(unsigned long) k->height,(long) k->x,(long) k->y); (void) FormatLocaleFile(stderr, " with values from %.*lg to %.*lg\n", GetMagickPrecision(), k->minimum, GetMagickPrecision(), k->maximum); (void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg", GetMagickPrecision(), k->negative_range, GetMagickPrecision(), k->positive_range); if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon ) (void) FormatLocaleFile(stderr, " (Zero-Summing)\n"); else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon ) (void) FormatLocaleFile(stderr, " (Normalized)\n"); else (void) FormatLocaleFile(stderr, " (Sum %.*lg)\n", GetMagickPrecision(), k->positive_range+k->negative_range); for (i=v=0; v < k->height; v++) { (void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v ); for (u=0; u < k->width; u++, i++) if ( IsNaN(k->values[i]) ) (void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan"); else (void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3, GetMagickPrecision(), k->values[i]); (void) FormatLocaleFile(stderr,"\n"); } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n i t y A d d K e r n a l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel % to the given pre-scaled and normalized Kernel. This in effect adds that % amount of the original image into the resulting convolution kernel. This % value is usually provided by the user as a percentage value in the % 'convolve:scale' setting. % % The resulting effect is to convert the defined kernels into blended % soft-blurs, unsharp kernels or into sharpening kernels. % % The format of the UnityAdditionKernelInfo method is: % % void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o scale: % scaling factor for the unity kernel to be added to % the given kernel. % */ MagickExport void UnityAddKernelInfo(KernelInfo *kernel, const double scale) { /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) UnityAddKernelInfo(kernel->next, scale); /* Add the scaled unity kernel to the existing kernel */ kernel->values[kernel->x+kernel->y*kernel->width] += scale; CalcKernelMetaData(kernel); /* recalculate the meta-data */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Z e r o K e r n e l N a n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroKernelNans() replaces any special 'nan' value that may be present in % the kernel with a zero value. This is typically done when the kernel will % be used in special hardware (GPU) convolution processors, to simply % matters. % % The format of the ZeroKernelNans method is: % % void ZeroKernelNans (KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % */ MagickExport void ZeroKernelNans(KernelInfo *kernel) { register size_t i; /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) ZeroKernelNans(kernel->next); for (i=0; i < (kernel->width*kernel->height); i++) if ( IsNaN(kernel->values[i]) ) kernel->values[i] = 0.0; return; }
keystore_fmt_plug.c
/* Java KeyStore cracker. Written by Dhiru Kholia <dhiru at openwall.com> and * Narendra Kangralkar <narendrakangralkar at gmail.com>. * * Input Format: $keystore$target$data_length$data$hash$nkeys$keylength$keydata$keylength$keydata... * * This software is Copyright (c) 2013, Dhiru Kholia <dhiru.kholia at gmail.com> * and Narendra Kangralkar <narendrakangralkar at gmail.com> and it is hereby * released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without modification, * are permitted. * * major re-write - JimF, Feb, 2016. * Added SIMD and prebuild all salt data for SIMD. * made a common code module (for sharing code with GPU) */ #if FMT_EXTERNS_H extern struct fmt_main fmt_keystore; #elif FMT_REGISTERS_H john_register_one(&fmt_keystore); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "simd-intrinsics.h" //#undef SIMD_COEF_32 #include "sha.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "dyna_salt.h" #include "johnswap.h" #include "keystore_common.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #if SIMD_COEF_32 #define OMP_SCALE 1024 #else #define OMP_SCALE 64 #endif #endif #elif SIMD_COEF_32 #define OMP_SCALE 128 #endif #include "memdbg.h" #ifdef SIMD_COEF_32 #define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA1) #endif #define FORMAT_LABEL "keystore" #define FORMAT_NAME "Java KeyStore" #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME #else #define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(struct keystore_salt *) #define SALT_ALIGN sizeof(struct keystore_salt *) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int (*saved_len); static SHA_CTX (*saved_ctx); static int dirty; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static int *MixOrder, MixOrderLen; #ifdef SIMD_COEF_32 #define GETPOS(i, index) ((index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*4*SIMD_COEF_32) static unsigned salt_mem_total; typedef struct preload_t { // Only handle password lengths of 4 to 24 (21 elements) in this code. // passwords of other lengths are handled by oSSL CTX method. ARCH_WORD_32 (*first_blk)[21][SHA_BUF_SIZ*NBKEYS]; ARCH_WORD_32 *ex_data[21]; int n_ex[21]; // number of sha blocks in ex_data. unsigned char data_hash[20]; // to find if this one loaded before. struct preload_t *next; } preload; static preload *salt_preload; // this is our linked list. static preload *cursimd; // set_salt points this to the current salt. #endif typedef struct keystore_salt_t { dyna_salt dsalt; int target; int data_length; int count; int keysize; unsigned char data_hash[20]; // this is the SHA of the data block. unsigned char *data; unsigned char *keydata; void *ptr; // points to a pre-built salt record (only SIMD) } keystore_salt; static keystore_salt *keystore_cur_salt; /* To guard against tampering with the keystore, we append a keyed * hash with a bit of whitener. */ static inline void getPreKeyedHash(int idx) { int i, j; unsigned char passwdBytes[PLAINTEXT_LENGTH * 2]; const char *magic = "Mighty Aphrodite"; char *password = saved_key[idx]; SHA_CTX *ctxp = &saved_ctx[idx]; for (i=0, j=0; i < strlen(password); i++) { // should this be proper LE UTF16 encoded??? NOPE. We now have // a utf-8 encoded test hash, and the below method works. // actually tried utf8_to_utf16_be, and the ascii passwords // work fine, but the utf8 hash FAILS. //passwdBytes[j++] = (password[i] >> 8); passwdBytes[j++] = 0; passwdBytes[j++] = password[i]; } SHA1_Init(ctxp); SHA1_Update(ctxp, passwdBytes, saved_len[idx] * 2); SHA1_Update(ctxp, magic, 16); } static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #elif SIMD_COEF_32 self->params.max_keys_per_crypt *= OMP_SCALE; #endif // we need 1 more saved_key than is 'used'. This extra key is used // in SIMD code, for all part full grouped blocks. saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt + 1); saved_len = mem_calloc(sizeof(*saved_len), self->params.max_keys_per_crypt + 1); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); saved_ctx = mem_calloc(sizeof(*saved_ctx), self->params.max_keys_per_crypt); MixOrderLen = self->params.max_keys_per_crypt*MAX_KEYS_PER_CRYPT+MAX_KEYS_PER_CRYPT; MixOrder = mem_calloc(MixOrderLen, sizeof(int)); } static void done(void) { MEM_FREE(MixOrder); MEM_FREE(saved_ctx); MEM_FREE(crypt_out); MEM_FREE(saved_len); MEM_FREE(saved_key); #ifdef SIMD_COEF_32 while (salt_preload) { int i; for (i = 20; i >= 0; --i) MEM_FREE(salt_preload->ex_data[i]); MEM_FREE(salt_preload->first_blk); salt_preload = salt_preload->next; } #endif } #ifdef SIMD_COEF_32 static void link_salt(keystore_salt *ps) { const unsigned char *magic = (const unsigned char*)"Mighty Aphrodite"; const unsigned char *cpm; unsigned char *cpo; int threads=1; int j,k,t,idx; preload *p = salt_preload; #ifdef _OPENMP threads = omp_get_max_threads(); #endif while (p) { if (!memcmp(p->data_hash, ps->data_hash, 20)) { ps->ptr = p; return; } p = p->next; } p = (preload *)mem_alloc_tiny(sizeof(preload), 16); memset(p, 0, sizeof(preload)); memcpy(p->data_hash, ps->data_hash, 20); // make sure this salt was not already loaded. IF it is loaded, then // adjust the pointer in the salt-db record. p->first_blk = mem_calloc_align(threads, sizeof(*p->first_blk), MEM_ALIGN_SIMD); salt_mem_total += threads*sizeof(*p->first_blk); for (t = 0; t < threads; ++t) { // t is threads for (j = 0; j < 21; ++j) { // j is length-4 of candidate password // actual length of this full string to SHA1. unsigned bits, len = (j+4)*2+16+ps->data_length; cpo = (unsigned char*)p->first_blk[t][j]; for (idx = 0; idx < NBKEYS; ++idx) { cpm = magic; for (k = (j+4)*2; *cpm; ++k) { cpo[GETPOS(k, idx)] = *cpm++; } cpm = ps->data; while (k < 64) { cpo[GETPOS(k, idx)] = *cpm++; ++k; } } if (t==0) { // we only add 1 instance of the ex_data. for each // password length, since this data is read only. // All threads can share it. p->ex_data[j] = mem_calloc_align((len+8)/64+1, 64*NBKEYS, MEM_ALIGN_SIMD); salt_mem_total += ((len+8)/64+1)*64*NBKEYS; for (idx = 0; idx < NBKEYS; ++idx) { int x, z=64-((j+4)*2+16), x_full=0; cpm = ps->data; cpm += z; cpo = (unsigned char*)p->ex_data[j]; for (x=0; x+z < ps->data_length; ++x) { cpo[GETPOS(x, idx)] = *cpm++; if (x == 63) { x -= 64; cpo += 64*NBKEYS; z += 64; x_full += 64; } } cpo[GETPOS(x, idx)] = 0x80; x += x_full; p->n_ex[j] = x/64+1; if (x%64 > 55) { ++p->n_ex[j]; cpo += 64*NBKEYS; } // now put bit length; bits = len<<3; x = 63; while (bits) { cpo[GETPOS(x, idx)] = bits&0xFF; bits >>= 8; --x; } } } } } // link this preload record into our list. p->next = salt_preload; salt_preload = p; // Adjust salt record. ps->ptr = p; } #endif static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; SHA_CTX ctx; static void *ptr; keystore_salt cs; memset(&cs, 0, sizeof(keystore_salt)); ctcopy += 10; /* skip over "$keystore$" */ p = strtokm(ctcopy, "$"); cs.target = atoi(p); p = strtokm(NULL, "$"); cs.data_length = atoi(p); p = strtokm(NULL, "$"); cs.data = mem_alloc_tiny(cs.data_length, 1); for (i = 0; i < cs.data_length; i++) { cs.data[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; } // used as a way to later compare salts. It is ALSO the // hash for a 0 byte password for this salt. SHA1_Init(&ctx); SHA1_Update(&ctx, "Mighty Aphrodite", 16); SHA1_Update(&ctx, cs.data, cs.data_length); SHA1_Final(cs.data_hash, &ctx); #ifdef SIMD_COEF_32 link_salt(&cs); #endif p = strtokm(NULL, "$"); /* skip hash */ p = strtokm(NULL, "$"); cs.count = atoi(p); p = strtokm(NULL, "$"); cs.keysize = atoi(p); cs.keydata = mem_alloc_tiny(cs.keysize, 1); for (i = 0; i < cs.keysize; i++) cs.keydata[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); // setup the dyna_salt stuff. cs.dsalt.salt_cmp_offset = SALT_CMP_OFF(keystore_salt, data_length); cs.dsalt.salt_cmp_size = SALT_CMP_SIZE(keystore_salt, data_length, data, 0); cs.dsalt.salt_alloc_needs_free = 0; ptr = mem_alloc_tiny(sizeof(keystore_salt), MEM_ALIGN_WORD); memcpy(ptr, &cs, sizeof(keystore_salt)); return (void *) &ptr; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { keystore_cur_salt = *(keystore_salt **) salt; #ifdef SIMD_COEF_32 cursimd = (preload*)keystore_cur_salt->ptr; #endif } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index, tot_todo; #ifdef SIMD_COEF_32 // in SIMD code, we need to sort by password length. NOTE, 0-3 and +24 // byte passwords 'all' group into the final group. Those are run 1 at // a time through CTX based code. int j, tot=0; tot_todo = 0; saved_len[count] = 0; // point all 'tail' MMX buffer elements to this location. for (j = 0; j < 21 && tot<count; ++j) { for (index = 0; index < count; ++index) { if (saved_len[index] == j+4) { MixOrder[tot_todo++] = index; ++tot; } } while (tot_todo % MAX_KEYS_PER_CRYPT) MixOrder[tot_todo++] = count; } if (tot < count) { // these do not get SIMD usage. for (index = 0; index < count; ++index) { if (saved_len[index] < 4 || saved_len[index] > 24) { MixOrder[tot_todo] = index; ++tot; // we only want to do ONE password CTX mode // per loop through the thread. tot_todo += MAX_KEYS_PER_CRYPT; } } } #else // no need to mix. just run them one after the next, in any order. for (index = 0; index < count; ++index) MixOrder[index] = index; tot_todo = count; #endif index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < tot_todo; index += MAX_KEYS_PER_CRYPT) { SHA_CTX ctx; #ifdef SIMD_COEF_32 int x, tid=0, len, idx; char tmp_sse_out[20*MAX_KEYS_PER_CRYPT+MEM_ALIGN_SIMD]; ARCH_WORD_32 *sse_out; sse_out = (ARCH_WORD_32 *)mem_align(tmp_sse_out, MEM_ALIGN_SIMD); #ifdef _OPENMP tid = omp_get_thread_num(); #endif len = saved_len[MixOrder[index]]; if (len >= 4 && len <= 24) { unsigned char *po; po = (unsigned char*)cursimd->first_blk[tid][len-4]; for (x = 0; x < MAX_KEYS_PER_CRYPT; ++x) { int j; unsigned char *p; idx = MixOrder[index+x]; p = (unsigned char*)saved_key[idx]; for (j = 0; j < len; ++j) po[GETPOS(j*2+1,x)] = p[j]; } SIMDSHA1body(po, sse_out, NULL, SSEi_MIXED_IN); po = (unsigned char*)cursimd->ex_data[len-4]; for (x = 0; x < cursimd->n_ex[len-4]; ++x) { SIMDSHA1body(po, sse_out, sse_out, SSEi_MIXED_IN|SSEi_RELOAD); po += 64*MAX_KEYS_PER_CRYPT; } #ifdef SIMD_COEF_32 // we have to 'marshal' the data back into the SIMD output buf. // but we only marshal the first 4 bytes. for (x = 0; x < MAX_KEYS_PER_CRYPT; ++x) { idx = MixOrder[index+x]; if (idx < count) crypt_out[idx][0] = JOHNSWAP(sse_out[5*SIMD_COEF_32*(x/SIMD_COEF_32)+x%SIMD_COEF_32]); } #endif // we do NOT want to fall through. We handled this // SIMD block of data already. continue; } #endif if (dirty) getPreKeyedHash(MixOrder[index]); if (saved_len[MixOrder[index]] == 0) memcpy(crypt_out[MixOrder[index]], keystore_cur_salt->data_hash, 20); else { memcpy(&ctx, &saved_ctx[MixOrder[index]], sizeof(ctx)); SHA1_Update(&ctx, keystore_cur_salt->data, keystore_cur_salt->data_length); SHA1_Final((unsigned char*)crypt_out[MixOrder[index]], &ctx); } } dirty = 0; return count; } static int cmp_all(void *binary, int count) { int index = 0; #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (; index < count; index++) #endif if (((ARCH_WORD_32*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { if (((ARCH_WORD_32*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_exact(char *source, int index) { unsigned char *binary = (unsigned char *)keystore_common_get_binary(source); #ifdef SIMD_COEF_32 // in SIMD, we only have the first 4 bytes copied into the binary buffer. // to for a cmp_one, so we do a full CTX type check SHA_CTX ctx; getPreKeyedHash(index); memcpy(&ctx, &saved_ctx[index], sizeof(ctx)); SHA1_Update(&ctx, keystore_cur_salt->data, keystore_cur_salt->data_length); SHA1_Final((unsigned char*)crypt_out[index], &ctx); #endif return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static void keystore_set_key(char *key, int index) { saved_len[index] = strlen(key); strcpy(saved_key[index], key); dirty = 1; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_keystore = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_DYNA_SALT, /* FIXME: report keystore_cur_salt->data_length as tunable cost? */ { NULL }, keystore_common_tests }, { init, done, fmt_default_reset, fmt_default_prepare, keystore_common_valid_cpu, fmt_default_split, keystore_common_get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, keystore_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 16; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
pi-monte.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <time.h> #include <math.h> #define MOD 10000007 #define MULTIPLIER 1234 #define INCREMENT 12345 long long seed; #pragma omp threadprivate(seed) long long leap_multiplier = MULTIPLIER, leap_increment = INCREMENT; long lcg() { seed = (leap_multiplier*seed + leap_increment)%MOD; return seed; } void set_pseudo_rand_seed(long p_seed) { seed = p_seed; } double pseudo_rand() { long rand_num = lcg(); //printf("%ld\n", rand_num); return 2.0*rand_num/(MOD-1); } long long modexp(long long base, long long exp) { long long ans = 1; while(exp) { if(exp % 2) ans = (ans * base) % MOD; base = (base * base) % MOD; exp = exp >> 1; } return ans; } double seq_pimonte(long num_steps,int r) { double cx =1.0, cy = 1.0; double px,py,d; double count = 0; set_pseudo_rand_seed(1234); for (int i = 0; i < num_steps; ++i) { px = pseudo_rand(); py = pseudo_rand(); //printf("%lf %lf\n", px,py); d = sqrt((px - 1)*(px - 1) + (py - 1)*(py - 1)); if(d <= r) { count++; } } return 4.0*count/num_steps; } double parallel_pimonte(long num_steps,int r,int NUM_THREADS) { double cx =1.0, cy = 1.0; double px,py,d,count=0; int nthreads,t_seeds[20]; omp_set_num_threads(NUM_THREADS); #pragma omp parallel private(px,py,d) { #pragma omp single { nthreads = omp_get_num_threads(); t_seeds[0] = MOD/MULTIPLIER; for(int i=1;i<nthreads;++i) t_seeds[i] = ((MULTIPLIER*t_seeds[i-1] + MOD)%MOD + INCREMENT + MOD)%MOD; leap_multiplier = modexp(MULTIPLIER,nthreads); leap_increment = ((INCREMENT*(leap_multiplier - 1 + MOD)%MOD)*modexp(MULTIPLIER - 1,MOD - 2)+MOD)%MOD; } int id = omp_get_thread_num(); set_pseudo_rand_seed(t_seeds[id]); #pragma omp for reduction(+:count) for(int i=0;i<num_steps;++i) { px = pseudo_rand(); py = pseudo_rand(); d = sqrt((px - 1)*(px - 1) + (py - 1)*(py - 1)); if(d <= r) { count = count + 1; } } } return 4.0*count/num_steps; } int main() { long num_steps = 10000000; double time_taken_seq,time_taken_parallel; double pi; time_taken_seq = omp_get_wtime(); pi = seq_pimonte(num_steps,1); time_taken_seq = omp_get_wtime() - time_taken_seq; printf("Sequential program Pi : %lf \n", pi); printf("Parallel Calculation\n"); int NUM_THREADS = 2; while(NUM_THREADS<=20) { time_taken_parallel = omp_get_wtime(); pi = parallel_pimonte(num_steps,1,NUM_THREADS); time_taken_parallel = omp_get_wtime() - time_taken_parallel; printf("Pi : %lf \t Speedup: %lf \t Threads : %d\n", pi,time_taken_parallel/time_taken_seq,NUM_THREADS); NUM_THREADS++; } }
mandelbrot.c
#include <omp.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <gmp.h> #define real float #define NO_COLOR -1 #define DEBUG_COLOR -1 static int precision = 32; static int maxiter = 30; static int w = 800; static int h = 600; static mpf_t x_b; static mpf_t y_b; static mpf_t step; static real *iterData = 0; static int debug = 0; static int gridsize = 10; static real iterate_point(int x0_int, int y0_int) { real val = iterData[y0_int * w + x0_int]; if(val != NO_COLOR) { return val; } mpf_t x0; mpf_t y0; mpf_t valsq; mpf_t x; mpf_t y; mpf_t xt; mpf_t xs; mpf_t ys; mpf_t tmp; mpf_init2(x0, precision); mpf_init2(y0, precision); mpf_init2(valsq, precision); mpf_init2(x, precision); mpf_init2(y, precision); mpf_init2(xt, precision); mpf_init2(xs, precision); mpf_init2(ys, precision); mpf_init2(tmp, precision); mpf_mul_ui(x0, step, x0_int); mpf_add(x0, x0, x_b); mpf_mul_ui(y0, step, y0_int); mpf_add(y0, y0, y_b); int iter = 0; mpf_set(x, x0); mpf_set(y, y0); mpf_mul(xs, x, x); mpf_mul(ys, y, y); mpf_add(valsq, xs, ys); while((mpf_cmp_d(valsq, 4.0) < 0) && (iter < maxiter)) { mpf_sub(xt, xs, ys); mpf_add(xt, xt, x0); mpf_mul(tmp, x, y); mpf_mul_2exp(y, tmp, 1); mpf_add(y, y, y0); mpf_set(x, xt); mpf_mul(xs, x, x); mpf_mul(ys, y, y); mpf_add(valsq, xs, ys); ++iter; } //double r_valsq = mpf_get_d(valsq); mpf_clear(x0); mpf_clear(y0); mpf_clear(valsq); mpf_clear(x); mpf_clear(y); mpf_clear(xt); mpf_clear(xs); mpf_clear(ys); mpf_clear(tmp); return (real)iter; //return (iter - log2(log2(r_valsq) * 0.5)); } static int isNotEqualColor(real a, real b) { return a != b; } static void fillRekt(int xb, int xe, int yb, int ye) { int dx = xe-xb; int dy = ye-yb; //printf("%d %d\n", xb, dx); if((dy <= 1) || (dx <= 1)) { for(int i = yb; i <= ye; ++i) { for(int k = xb; k <= xe; ++k) { real val = iterData[i * w + k]; if(val == NO_COLOR) { val = iterate_point(k, i); iterData[i * w + k] = val; } } } return; } real origVal = NO_COLOR; int same = 1; for(int i = xb; i <= xe; ++i) { real val = iterate_point(i, yb); iterData[yb * w + i] = val; if(origVal == NO_COLOR) { origVal = val; } else if(isNotEqualColor(origVal, val)) { same = 0; } } for(int i = xb; i <= xe; ++i) { real val = iterate_point(i, ye); iterData[ye * w + i] = val; if(origVal == NO_COLOR) { origVal = val; } else if(isNotEqualColor(origVal, val)) { same = 0; } } for(int i = yb; i <= ye; ++i) { real val = iterate_point(xb, i); iterData[i * w + xb] = val; if(origVal == NO_COLOR) { origVal = val; } else if(isNotEqualColor(origVal, val)) { same = 0; } } for(int i = yb; i <= ye; ++i) { real val = iterate_point(xe, i); iterData[i * w + xe] = val; if(origVal == NO_COLOR) { origVal = val; } else if(isNotEqualColor(origVal, val)) { same = 0; } } if(same) { for(int i = yb+1; (i <= (ye-1)) && (i < h); ++i) { for(int k = xb+1; (k <= (xe-1)) && (k < w); ++k) { if(debug) { iterData[i * w + k] = DEBUG_COLOR; } else { iterData[i * w + k] = origVal; } } } return; } if(dx > dy) { int midx = (xb + xe) / 2; fillRekt(xb, midx, yb, ye); fillRekt(midx, xe, yb, ye); } else { int midy = (yb + ye) / 2; fillRekt(xb, xe, yb, midy); fillRekt(xb, xe, midy, ye); } } static int min(int a, int b) { return a < b ? a : b; } int main(int argc, char **argv) { precision = 32; maxiter = 100; w = 1920; h = 1080; debug = 1; if(argc > 1) { debug = atoi(argv[1]); } gridsize = 16; mpf_init2(step, precision); mpf_init2(x_b, precision); mpf_init2(y_b, precision); mpf_set_d(step, 4.0 / w); mpf_set_d(x_b, -2.5); mpf_set_d(y_b, -1.125); iterData = malloc(sizeof(real) * w * h); for(int i = 0; i < w*h; ++i) { iterData[i] = NO_COLOR; } /* //#pragma omp parallel for schedule(dynamic, 1) for(int wy = 0; wy < h; ++wy) { for(int wx = 0; wx < w; ++wx) { real iter = iterate_point(wx, wy); iterData[wy * w + wx] = iter; } } */ //fillRekt(0, w/2, 0, h-1); //fillRekt(w/2, w-1, 0, h-1); const int istep = w/gridsize; const int hdirec = h/istep + 1; const int all = hdirec * gridsize; #pragma omp parallel for schedule(dynamic, 1) for(int i = 0; i < all; ++i) { int cubex = i % gridsize; int cubey = i / gridsize; int beginx = cubex * istep; int beginy = cubey * istep; int endx = min(beginx + istep, w-1); int endy = min(beginy + istep, h-1); fillRekt(beginx, endx, beginy, endy); } //test output in *.ppm printf("P3\n%d\n%d\n%d\n", w, h, 255); for(int wy = 0; wy < h; ++wy) { for(int wx = 0; wx < w; ++wx) { real val = iterData[wy * w + wx]; if(val == DEBUG_COLOR) { printf("%d %d %d\n", 0, 128, 64); continue; } int colVal = (int)((1.0 - (val/maxiter)) * 255.0); if(colVal > 255) colVal = 255; if(colVal < 0) colVal = 0; printf("%d %d %d\n", colVal, colVal, colVal); } } mpf_clear(x_b); mpf_clear(y_b); mpf_clear(step); free(iterData); return 0; }
argmax.h
// Copyright 2018 Xiaomi, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_KERNELS_ARGMAX_H_ #define MACE_KERNELS_ARGMAX_H_ #include <algorithm> #include <functional> #include <limits> #include <memory> #include <vector> #include "mace/core/future.h" #include "mace/core/tensor.h" #include "mace/public/mace.h" #include "mace/utils/utils.h" namespace mace { namespace kernels { template <DeviceType D, typename T> struct ArgMaxFunctor { MaceStatus operator()(const Tensor *input, const Tensor *axis, Tensor *output, StatsFuture *future) { MACE_UNUSED(future); MACE_CHECK(input->dim_size() > 0, "ArgMax input should not be a scalar"); MACE_CHECK(axis->dim_size() == 0, "Mace argmax only supports scalar axis"); Tensor::MappingGuard axis_guard(axis); int axis_value = axis->data<int32_t>()[0]; if (axis_value < 0) { axis_value += input->dim_size(); } MACE_CHECK(axis_value == input->dim_size() - 1, "Mace argmax only supports last dimension as axis"); std::vector<index_t> output_shape(input->dim_size() - 1); for (index_t d = 0; d < input->dim_size() - 1; ++d) { output_shape[d] = input->dim(d < axis_value ? d : d + 1); } MACE_RETURN_IF_ERROR(output->Resize(output_shape)); Tensor::MappingGuard input_guard(input); Tensor::MappingGuard output_guard(output); auto input_data = input->data<T>(); auto output_data = output->mutable_data<int32_t>(); index_t outer_size = output->size(); index_t inner_size = input->dim(axis_value); #pragma omp parallel for for (index_t i = 0; i < outer_size; ++i) { int idx = 0; T max_value = std::numeric_limits<T>::lowest(); const T *input_ptr = input_data + i * inner_size; for (index_t j = 0; j < inner_size; ++j) { if (input_ptr[j] > max_value) { max_value = input_ptr[j]; idx = j; } } output_data[i] = idx; } return MACE_SUCCESS; } }; } // namespace kernels } // namespace mace #endif // MACE_KERNELS_ARGMAX_H_
ft.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 3.0 structured OpenMP C versions - FT This benchmark is an OpenMP C version of the NPB FT code. The OpenMP C 2.3 versions are derived by RWCP from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. 3.0 translation is performed by the UVSQ. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: D. Bailey W. Saphir OpenMP C version: S. Satoh 3.0 structure translation: M. Popov --------------------------------------------------------------------*/ #include "../common/npb-C.h" /* global variables */ #include "global.h" /* function declarations */ static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX], int t, int indexmap[NZ][NY][NX], int d[3]); static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]); static void ipow46(double a, int exponent, double *result); static void setup(void); static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]); static void print_timers(void); static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]); static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]); static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]); static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]); static void fft_init (int n); static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]); static void fftz2 (int is, int l, int m, int n, int ny, int ny1, dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]); static int ilog2(int n); static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]); static void verify (int d1, int d2, int d3, int nt, boolean *verified, char *class); /*-------------------------------------------------------------------- c FT benchmark c-------------------------------------------------------------------*/ int main(int argc, char **argv) { /*c------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i, ierr; /*------------------------------------------------------------------ c u0, u1, u2 are the main arrays in the problem. c Depending on the decomposition, these arrays will have different c dimensions. To accomodate all possibilities, we allocate them as c one-dimensional arrays and pass them to subroutines for different c views c - u0 contains the initial (transformed) initial condition c - u1 and u2 are working arrays c - indexmap maps i,j,k of u0 to the correct i^2+j^2+k^2 for the c time evolution operator. c-----------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Large arrays are in common so that they are allocated on the c heap rather than the stack. This common block is not c referenced directly anywhere else. Padding is to avoid accidental c cache problems, since all array sizes are powers of two. c-------------------------------------------------------------------*/ static dcomplex u0[NZ][NY][NX]; static dcomplex pad1[3]; static dcomplex u1[NZ][NY][NX]; static dcomplex pad2[3]; static dcomplex u2[NZ][NY][NX]; static dcomplex pad3[3]; static int indexmap[NZ][NY][NX]; int iter; int nthreads = 1; double total_time, mflops; boolean verified; char class; /*-------------------------------------------------------------------- c Run the entire problem once to make sure all data is touched. c This reduces variable startup costs, which is important for such a c short benchmark. The other NPB 2 implementations are similar. c-------------------------------------------------------------------*/ for (i = 0; i < T_MAX; i++) { timer_clear(i); } setup(); compute_indexmap(indexmap, dims[2]); compute_initial_conditions(u1, dims[0]); fft_init (dims[0][0]); fft(1, u1, u0); /*-------------------------------------------------------------------- c Start over from the beginning. Note that all operations must c be timed, in contrast to other benchmarks. c-------------------------------------------------------------------*/ for (i = 0; i < T_MAX; i++) { timer_clear(i); } timer_start(T_TOTAL); if (TIMERS_ENABLED == TRUE) timer_start(T_SETUP); compute_indexmap(indexmap, dims[2]); compute_initial_conditions(u1, dims[0]); fft_init (dims[0][0]); if (TIMERS_ENABLED == TRUE) { timer_stop(T_SETUP); } if (TIMERS_ENABLED == TRUE) { timer_start(T_FFT); } fft(1, u1, u0); if (TIMERS_ENABLED == TRUE) { timer_stop(T_FFT); } for (iter = 1; iter <= niter; iter++) { if (TIMERS_ENABLED == TRUE) { timer_start(T_EVOLVE); } evolve(u0, u1, iter, indexmap, dims[0]); if (TIMERS_ENABLED == TRUE) { timer_stop(T_EVOLVE); } if (TIMERS_ENABLED == TRUE) { timer_start(T_FFT); } fft(-1, u1, u2); if (TIMERS_ENABLED == TRUE) { timer_stop(T_FFT); } if (TIMERS_ENABLED == TRUE) { timer_start(T_CHECKSUM); } checksum(iter, u2, dims[0]); if (TIMERS_ENABLED == TRUE) { timer_stop(T_CHECKSUM); } } verify(NX, NY, NZ, niter, &verified, &class); { #if defined(_OPENMP) nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end parallel */ timer_stop(T_TOTAL); total_time = timer_read(T_TOTAL); if( total_time != 0.0) { mflops = 1.0e-6*(double)(NTOTAL) * (14.8157+7.19641*log((double)(NTOTAL)) + (5.23518+7.21113*log((double)(NTOTAL)))*niter) /total_time; } else { mflops = 0.0; } c_print_results("FT", class, NX, NY, NZ, niter, nthreads, total_time, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); if (TIMERS_ENABLED == TRUE) print_timers(); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX], int t, int indexmap[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c evolve u0 -> u1 (t time steps) in fourier space c-------------------------------------------------------------------*/ int i, j, k; #pragma omp parallel for private(i ,j ,k ) for (k = 0; k < d[2]; k++) { for (j = 0; j < d[1]; j++) { for (i = 0; i < d[0]; i++) { crmul(u1[k][j][i], u0[k][j][i], ex[t*indexmap[k][j][i]]); } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Fill in array u0 with initial conditions from c random number generator c-------------------------------------------------------------------*/ int k; double x0, start, an, dummy; static double tmp[NX*2*MAXDIM+1]; int i,j,t; start = SEED; /*-------------------------------------------------------------------- c Jump to the starting element for our first plane. c-------------------------------------------------------------------*/ ipow46(A, (zstart[0]-1)*2*NX*NY + (ystart[0]-1)*2*NX, &an); dummy = randlc(&start, an); ipow46(A, 2*NX*NY, &an); /*-------------------------------------------------------------------- c Go through by z planes filling in one square at a time. c-------------------------------------------------------------------*/ for (k = 0; k < dims[0][2]; k++) { x0 = start; vranlc(2*NX*dims[0][1], &x0, A, tmp); t = 1; for (j = 0; j < dims[0][1]; j++) for (i = 0; i < NX; i++) { u0[k][j][i].real = tmp[t++]; u0[k][j][i].imag = tmp[t++]; } if (k != dims[0][2]) dummy = randlc(&start, an); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void ipow46(double a, int exponent, double *result) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute a^exponent mod 2^46 c-------------------------------------------------------------------*/ double dummy, q, r; int n, n2; /*-------------------------------------------------------------------- c Use c a^n = a^(n/2)*a^(n/2) if n even else c a^n = a*a^(n-1) if n odd c-------------------------------------------------------------------*/ *result = 1; if (exponent == 0) return; q = a; r = 1; n = exponent; while (n > 1) { n2 = n/2; if (n2 * 2 == n) { dummy = randlc(&q, q); n = n2; } else { dummy = randlc(&r, q); n = n-1; } } dummy = randlc(&r, q); *result = r; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void setup(void) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int ierr, i, j, fstatus; printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - FT Benchmark\n\n"); niter = NITER_DEFAULT; printf(" Size : %3dx%3dx%3d\n", NX, NY, NZ); printf(" Iterations : %7d\n", niter); /* 1004 format(' Number of processes : ', i7) 1005 format(' Processor array : ', i3, 'x', i3) 1006 format(' WARNING: compiled for ', i5, ' processes. ', > ' Will not verify. ')*/ #pragma omp parallel for firstprivate(i ) for (i = 0;i < 3 ; i++) { dims[i][0] = NX; dims[i][1] = NY; dims[i][2] = NZ; } #pragma omp parallel for firstprivate(i ) for (i = 0; i < 3; i++) { xstart[i] = 1; xend[i] = NX; ystart[i] = 1; yend[i] = NY; zstart[i] = 1; zend[i] = NZ; } /*-------------------------------------------------------------------- c Set up info for blocking of ffts and transposes. This improves c performance on cache-based systems. Blocking involves c working on a chunk of the problem at a time, taking chunks c along the first, second, or third dimension. c c - In cffts1 blocking is on 2nd dimension (with fft on 1st dim) c - In cffts2/3 blocking is on 1st dimension (with fft on 2nd and 3rd dims) c Since 1st dim is always in processor, we'll assume it's long enough c (default blocking factor is 16 so min size for 1st dim is 16) c The only case we have to worry about is cffts1 in a 2d decomposition. c so the blocking factor should not be larger than the 2nd dimension. c-------------------------------------------------------------------*/ fftblock = FFTBLOCK_DEFAULT; fftblockpad = FFTBLOCKPAD_DEFAULT; if (fftblock != FFTBLOCK_DEFAULT) fftblockpad = fftblock+3; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2 c for time evolution exponent. c-------------------------------------------------------------------*/ int i, j, k, ii, ii2, jj, ij2, kk; double ap; /*-------------------------------------------------------------------- c basically we want to convert the fortran indices c 1 2 3 4 5 6 7 8 c to c 0 1 2 3 -4 -3 -2 -1 c The following magic formula does the trick: c mod(i-1+n/2, n) - n/2 c-------------------------------------------------------------------*/ #pragma omp parallel for private(i ,j ,k ,ii ,ii2 ,jj ,ij2 ,kk ) for (i = 0; i < dims[2][0]; i++) { ii = (i+1+xstart[2]-2+NX/2)%NX - NX/2; ii2 = ii*ii; #pragma omp parallel for firstprivate(k ,j ,ii ,ii2 ,jj ,ij2 ,kk ,indexmap ,i ) for (j = 0; j < dims[2][1]; j++) { jj = (j+1+ystart[2]-2+NY/2)%NY - NY/2; ij2 = jj*jj+ii2; #pragma omp parallel for firstprivate(k ,j ,ii ,ii2 ,jj ,ij2 ,kk ,indexmap ,i ) for (k = 0; k < dims[2][2]; k++) { kk = (k+1+zstart[2]-2+NZ/2)%NZ - NZ/2; indexmap[k][j][i] = kk*kk+ij2; } } } /*-------------------------------------------------------------------- c compute array of exponentials for time evolution. c-------------------------------------------------------------------*/ ap = - 4.0 * ALPHA * PI * PI; ex[0] = 1.0; ex[1] = exp(ap); for (i = 2; i <= EXPMAX; i++) { ex[i] = ex[i-1]*ex[1]; } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void print_timers(void) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i; char *tstrings[] = { " total ", " setup ", " fft ", " evolve ", " checksum ", " fftlow ", " fftcopy " }; for (i = 0; i < T_MAX; i++) { if (timer_read(i) != 0.0) { printf("timer %2d(%16s( :%10.6f\n", i, tstrings[i], timer_read(i)); } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ dcomplex y0[NX][FFTBLOCKPAD]; dcomplex y1[NX][FFTBLOCKPAD]; /*-------------------------------------------------------------------- c note: args x1, x2 must be different arrays c note: args for cfftsx are (direction, layout, xin, xout, scratch) c xin/xout may be the same and it can be somewhat faster c if they are c-------------------------------------------------------------------*/ if (dir == 1) { cffts1(1, dims[0], x1, x1, y0, y1); /* x1 -> x1 */ cffts2(1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */ cffts3(1, dims[2], x1, x2, y0, y1); /* x1 -> x2 */ } else { cffts3(-1, dims[2], x1, x1, y0, y1); /* x1 -> x1 */ cffts2(-1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */ cffts1(-1, dims[0], x1, x2, y0, y1); /* x1 -> x2 */ } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int logd[3]; int i, j, k, jj; #pragma omp parallel for firstprivate(d ,i ) for (i = 0; i < 3; i++) { logd[i] = ilog2(d[i]); } { dcomplex y0[NX][FFTBLOCKPAD]; dcomplex y1[NX][FFTBLOCKPAD]; #pragma omp parallel for private(i ,j ,k ,jj ) for (k = 0; k < d[2]; k++) { for (jj = 0; jj <= d[1] - fftblock; jj+=fftblock) { /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ #pragma omp parallel for firstprivate(fftblock ,i ,jj ,x ,j ,k ) for (j = 0; j < fftblock; j++) { #pragma omp parallel for firstprivate(fftblock ,i ,jj ,x ,j ,k ) for (i = 0; i < d[0]; i++) { y0[i][j].real = x[k][j+jj][i].real; y0[i][j].imag = x[k][j+jj][i].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */ cfftz (is, logd[0], d[0], y0, y1); /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ #pragma omp parallel for firstprivate(fftblock ,i ,jj ,x ,j ,k ) for (j = 0; j < fftblock; j++) { for (i = 0; i < d[0]; i++) { xout[k][j+jj][i].real = y0[i][j].real; xout[k][j+jj][i].imag = y0[i][j].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int logd[3]; int i, j, k, ii; #pragma omp parallel for firstprivate(d ,i ) for (i = 0; i < 3; i++) { logd[i] = ilog2(d[i]); } { dcomplex y0[NX][FFTBLOCKPAD]; dcomplex y1[NX][FFTBLOCKPAD]; #pragma omp parallel for private(i ,j ,k ,ii ) for (k = 0; k < d[2]; k++) { for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) { /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ #pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,j ,k ) for (j = 0; j < d[1]; j++) { #pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,j ,k ) for (i = 0; i < fftblock; i++) { y0[j][i].real = x[k][j][i+ii].real; y0[j][i].imag = x[k][j][i+ii].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */ cfftz (is, logd[1], d[1], y0, y1); /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ #pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,j ,k ) for (j = 0; j < d[1]; j++) { for (i = 0; i < fftblock; i++) { xout[k][j][i+ii].real = y0[j][i].real; xout[k][j][i+ii].imag = y0[j][i].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int logd[3]; int i, j, k, ii; #pragma omp parallel for firstprivate(d ,i ) for (i = 0;i < 3; i++) { logd[i] = ilog2(d[i]); } { dcomplex y0[NX][FFTBLOCKPAD]; dcomplex y1[NX][FFTBLOCKPAD]; #pragma omp parallel for private(i ,j ,k ,ii ) for (j = 0; j < d[1]; j++) { for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) { /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ #pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,k ,j ) for (k = 0; k < d[2]; k++) { #pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,k ,j ) for (i = 0; i < fftblock; i++) { y0[k][i].real = x[k][j][i+ii].real; y0[k][i].imag = x[k][j][i+ii].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */ cfftz (is, logd[2], d[2], y0, y1); /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ #pragma omp parallel for firstprivate(i ,ii ,x ,fftblock ,k ,j ) for (k = 0; k < d[2]; k++) { for (i = 0; i < fftblock; i++) { xout[k][j][i+ii].real = y0[k][i].real; xout[k][j][i+ii].imag = y0[k][i].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fft_init (int n) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute the roots-of-unity array that will be used for subsequent FFTs. c-------------------------------------------------------------------*/ int m,nu,ku,i,j,ln; double t, ti; /*-------------------------------------------------------------------- c Initialize the U array with sines and cosines in a manner that permits c stride one access at each FFT iteration. c-------------------------------------------------------------------*/ nu = n; m = ilog2(n); u[0].real = (double)m; u[0].imag = 0.0; ku = 1; ln = 1; for (j = 1; j <= m; j++) { t = PI / ln; for (i = 0; i <= ln - 1; i++) { ti = i * t; u[i+ku].real = cos(ti); u[i+ku].imag = sin(ti); } ku = ku + ln; ln = 2 * ln; } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Computes NY N-point complex-to-complex FFTs of X using an algorithm due c to Swarztrauber. X is both the input and the output array, while Y is a c scratch array. It is assumed that N = 2^M. Before calling CFFTZ to c perform FFTs, the array U must be initialized by calling CFFTZ with IS c set to 0 and M set to MX, where MX is the maximum value of M for any c subsequent call. c-------------------------------------------------------------------*/ int i,j,l,mx; /*-------------------------------------------------------------------- c Check if input parameters are invalid. c-------------------------------------------------------------------*/ mx = (int)(u[0].real); if ((is != 1 && is != -1) || m < 1 || m > mx) { printf("CFFTZ: Either U has not been initialized, or else\n" "one of the input parameters is invalid%5d%5d%5d\n", is, m, mx); exit(1); } /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= m; l+=2) { fftz2 (is, l, m, n, fftblock, fftblockpad, u, x, y); if (l == m) break; fftz2 (is, l + 1, m, n, fftblock, fftblockpad, u, y, x); } /*-------------------------------------------------------------------- c Copy Y to X. c-------------------------------------------------------------------*/ if (m % 2 == 1) { for (j = 0; j < n; j++) { #pragma omp parallel for firstprivate(fftblock ,y ,x ,i ,j ) for (i = 0; i < fftblock; i++) { x[j][i].real = y[j][i].real; x[j][i].imag = y[j][i].imag; } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fftz2 (int is, int l, int m, int n, int ny, int ny1, dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Performs the L-th iteration of the second variant of the Stockham FFT. c-------------------------------------------------------------------*/ int k,n1,li,lj,lk,ku,i,j,i11,i12,i21,i22; dcomplex u1,x11,x21; /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ n1 = n / 2; if (l-1 == 0) { lk = 1; } else { lk = 2 << ((l - 1)-1); } if (m-l == 0) { li = 1; } else { li = 2 << ((m - l)-1); } lj = 2 * lk; ku = li; for (i = 0; i < li; i++) { i11 = i * lk; i12 = i11 + n1; i21 = i * lj; i22 = i21 + lk; if (is >= 1) { u1.real = u[ku+i].real; u1.imag = u[ku+i].imag; } else { u1.real = u[ku+i].real; u1.imag = -u[ku+i].imag; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k = 0; k < lk; k++) { for (j = 0; j < ny; j++) { double x11real, x11imag; double x21real, x21imag; x11real = x[i11+k][j].real; x11imag = x[i11+k][j].imag; x21real = x[i12+k][j].real; x21imag = x[i12+k][j].imag; y[i21+k][j].real = x11real + x21real; y[i21+k][j].imag = x11imag + x21imag; y[i22+k][j].real = u1.real * (x11real - x21real) - u1.imag * (x11imag - x21imag); y[i22+k][j].imag = u1.real * (x11imag - x21imag) + u1.imag * (x11real - x21real); } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static int ilog2(int n) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int nn, lg; if (n == 1) { return 0; } lg = 1; nn = 2; while (nn < n) { nn = nn << 1; lg++; } return lg; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]) { { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int j, q,r,s, ierr; dcomplex chk,allchk; chk.real = 0.0; chk.imag = 0.0; #pragma omp parallel for for (j = 1; j <= 1024; j++) { q = j%NX+1; if (q >= xstart[0] && q <= xend[0]) { r = (3*j)%NY+1; if (r >= ystart[0] && r <= yend[0]) { s = (5*j)%NZ+1; if (s >= zstart[0] && s <= zend[0]) { cadd(chk,chk,u1[s-zstart[0]][r-ystart[0]][q-xstart[0]]); } } } } { sums[i].real += chk.real; sums[i].imag += chk.imag; } { /* complex % real */ sums[i].real = sums[i].real/(double)(NTOTAL); sums[i].imag = sums[i].imag/(double)(NTOTAL); printf("T = %5d Checksum = %22.12e %22.12e\n", i, sums[i].real, sums[i].imag); } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void verify (int d1, int d2, int d3, int nt, boolean *verified, char *class) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int ierr, size, i; double err, epsilon; /*-------------------------------------------------------------------- c Sample size reference checksums c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Class S size reference checksums c-------------------------------------------------------------------*/ double vdata_real_s[6+1] = { 0.0, 5.546087004964e+02, 5.546385409189e+02, 5.546148406171e+02, 5.545423607415e+02, 5.544255039624e+02, 5.542683411902e+02 }; double vdata_imag_s[6+1] = { 0.0, 4.845363331978e+02, 4.865304269511e+02, 4.883910722336e+02, 4.901273169046e+02, 4.917475857993e+02, 4.932597244941e+02 }; /*-------------------------------------------------------------------- c Class W size reference checksums c-------------------------------------------------------------------*/ double vdata_real_w[6+1] = { 0.0, 5.673612178944e+02, 5.631436885271e+02, 5.594024089970e+02, 5.560698047020e+02, 5.530898991250e+02, 5.504159734538e+02 }; double vdata_imag_w[6+1] = { 0.0, 5.293246849175e+02, 5.282149986629e+02, 5.270996558037e+02, 5.260027904925e+02, 5.249400845633e+02, 5.239212247086e+02 }; /*-------------------------------------------------------------------- c Class A size reference checksums c-------------------------------------------------------------------*/ double vdata_real_a[6+1] = { 0.0, 5.046735008193e+02, 5.059412319734e+02, 5.069376896287e+02, 5.077892868474e+02, 5.085233095391e+02, 5.091487099959e+02 }; double vdata_imag_a[6+1] = { 0.0, 5.114047905510e+02, 5.098809666433e+02, 5.098144042213e+02, 5.101336130759e+02, 5.104914655194e+02, 5.107917842803e+02 }; /*-------------------------------------------------------------------- c Class B size reference checksums c-------------------------------------------------------------------*/ double vdata_real_b[20+1] = { 0.0, 5.177643571579e+02, 5.154521291263e+02, 5.146409228649e+02, 5.142378756213e+02, 5.139626667737e+02, 5.137423460082e+02, 5.135547056878e+02, 5.133910925466e+02, 5.132470705390e+02, 5.131197729984e+02, 5.130070319283e+02, 5.129070537032e+02, 5.128182883502e+02, 5.127393733383e+02, 5.126691062020e+02, 5.126064276004e+02, 5.125504076570e+02, 5.125002331720e+02, 5.124551951846e+02, 5.124146770029e+02 }; double vdata_imag_b[20+1] = { 0.0, 5.077803458597e+02, 5.088249431599e+02, 5.096208912659e+02, 5.101023387619e+02, 5.103976610617e+02, 5.105948019802e+02, 5.107404165783e+02, 5.108576573661e+02, 5.109577278523e+02, 5.110460304483e+02, 5.111252433800e+02, 5.111968077718e+02, 5.112616233064e+02, 5.113203605551e+02, 5.113735928093e+02, 5.114218460548e+02, 5.114656139760e+02, 5.115053595966e+02, 5.115415130407e+02, 5.115744692211e+02 }; /*-------------------------------------------------------------------- c Class C size reference checksums c-------------------------------------------------------------------*/ double vdata_real_c[20+1] = { 0.0, 5.195078707457e+02, 5.155422171134e+02, 5.144678022222e+02, 5.140150594328e+02, 5.137550426810e+02, 5.135811056728e+02, 5.134569343165e+02, 5.133651975661e+02, 5.132955192805e+02, 5.132410471738e+02, 5.131971141679e+02, 5.131605205716e+02, 5.131290734194e+02, 5.131012720314e+02, 5.130760908195e+02, 5.130528295923e+02, 5.130310107773e+02, 5.130103090133e+02, 5.129905029333e+02, 5.129714421109e+02 }; double vdata_imag_c[20+1] = { 0.0, 5.149019699238e+02, 5.127578201997e+02, 5.122251847514e+02, 5.121090289018e+02, 5.121143685824e+02, 5.121496764568e+02, 5.121870921893e+02, 5.122193250322e+02, 5.122454735794e+02, 5.122663649603e+02, 5.122830879827e+02, 5.122965869718e+02, 5.123075927445e+02, 5.123166486553e+02, 5.123241541685e+02, 5.123304037599e+02, 5.123356167976e+02, 5.123399592211e+02, 5.123435588985e+02, 5.123465164008e+02 }; epsilon = 1.0e-12; *verified = TRUE; *class = 'U'; if (d1 == 64 && d2 == 64 && d3 == 64 && nt == 6) { *class = 'S'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_s[i]) / vdata_real_s[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_s[i]) / vdata_imag_s[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 128 && d2 == 128 && d3 == 32 && nt == 6) { *class = 'W'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_w[i]) / vdata_real_w[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_w[i]) / vdata_imag_w[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 256 && d2 == 256 && d3 == 128 && nt == 6) { *class = 'A'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_a[i]) / vdata_real_a[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_a[i]) / vdata_imag_a[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 512 && d2 == 256 && d3 == 256 && nt == 20) { *class = 'B'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_b[i]) / vdata_real_b[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_b[i]) / vdata_imag_b[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 512 && d2 == 512 && d3 == 512 && nt == 20) { *class = 'C'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_c[i]) / vdata_real_c[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_c[i]) / vdata_imag_c[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } if (*class != 'U') { printf("Result verification successful\n"); } else { printf("Result verification failed\n"); } printf("class = %1c\n", *class); }
GB_binop__pow_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__pow_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__pow_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__pow_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_fp64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__pow_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_fp64) // C=scalar+B GB (_bind1st__pow_fp64) // C=scalar+B' GB (_bind1st_tran__pow_fp64) // C=A+scalar GB (_bind2nd__pow_fp64) // C=A'+scalar GB (_bind2nd_tran__pow_fp64) // C type: double // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = GB_pow (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_FP64 || GxB_NO_POW_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pow_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = GB_pow (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = GB_pow (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Example_linear_modifier.3.c
/* * @@name: linear_modifier.3c * @@type: C * @@compilable: yes * @@linkable: yes * @@expect: success * @@version: omp_4.5 */ #include <stdio.h> #define N 128 #pragma omp declare simd simdlen(4) uniform(x, y) linear(val(i):1) double func(double x[], double y[], int i) { return (x[i] + y[i]); } int main(void) { double x[N], y[N], z1[N], z2; int i, k; for (i = 0; i < N; i++) { x[i] = (double)i; y[i] = (double)i*2; } k = 0; #pragma omp simd linear(k) for (i = 0; i < N; i++) { z1[i] = func(x, y, k); k++; } for (i = 0; i < N; i++) { z2 = (double)(i + i*2); if (z1[i] != z2) { printf("failed\n"); return 1; } } printf("passed\n"); return 0; }
GB_binop__min_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__min_int8) // A.*B function (eWiseMult): GB (_AemultB_01__min_int8) // A.*B function (eWiseMult): GB (_AemultB_02__min_int8) // A.*B function (eWiseMult): GB (_AemultB_03__min_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__min_int8) // A*D function (colscale): GB (_AxD__min_int8) // D*A function (rowscale): GB (_DxB__min_int8) // C+=B function (dense accum): GB (_Cdense_accumB__min_int8) // C+=b function (dense accum): GB (_Cdense_accumb__min_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_int8) // C=scalar+B GB (_bind1st__min_int8) // C=scalar+B' GB (_bind1st_tran__min_int8) // C=A+scalar GB (_bind2nd__min_int8) // C=A'+scalar GB (_bind2nd_tran__min_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = GB_IMIN (aij, bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMIN (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_INT8 || GxB_NO_MIN_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__min_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__min_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__min_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__min_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__min_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__min_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__min_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__min_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__min_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__min_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__min_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__min_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMIN (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__min_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMIN (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMIN (x, aij) ; \ } GrB_Info GB (_bind1st_tran__min_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMIN (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__min_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
integrate.c
/* * integrate.c: Example of numerical integration in OpenMP. * * (C) 2015 Mikhail Kurnosov <mkurnosov@gmail.com> */ #include <stdio.h> #include <math.h> #include <sys/time.h> #include <omp.h> const double PI = 3.14159265358979323846; const double a = -4.0; const double b = 4.0; const int nsteps = 40000000; double wtime() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + (double)t.tv_usec * 1E-6; } double func(double x) { return exp(-x * x); } /* integrate: Integrates by rectangle method (midpoint rule) */ double integrate(double (*func)(double), double a, double b, int n) { double h = (b - a) / n; double sum = 0.0; for (int i = 0; i < n; i++) sum += func(a + h * (i + 0.5)); sum *= h; return sum; } double run_serial() { double t = wtime(); double res = integrate(func, a, b, nsteps); t = wtime() - t; printf("Result (serial): %.12f; error %.12f\n", res, fabs(res - sqrt(PI))); return t; } double integrate_omp(double (*func)(double), double a, double b, int n) { double h = (b - a) / n; double sum = 0.0; #pragma omp parallel { int nthreads = omp_get_num_threads(); int threadid = omp_get_thread_num(); int items_per_thread = n / nthreads; int lb = threadid * items_per_thread; int ub = (threadid == nthreads - 1) ? (n - 1) : (lb + items_per_thread - 1); for (int i = lb; i <= ub; i++) sum += func(a + h * (i + 0.5)); /* <-- data race here */ } sum *= h; return sum; } double run_parallel() { double t = wtime(); double res = integrate_omp(func, a, b, nsteps); t = wtime() - t; printf("Result (parallel): %.12f; error %.12f\n", res, fabs(res - sqrt(PI))); return t; } int main(int argc, char **argv) { printf("Integration f(x) on [%.12f, %.12f], nsteps = %d\n", a, b, nsteps); double tserial = run_serial(); double tparallel = run_parallel(); printf("Execution time (serial): %.6f\n", tserial); printf("Execution time (parallel): %.6f\n", tparallel); printf("Speedup: %.2f\n", tserial / tparallel); return 0; }
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; struct OMPTraitProperty; struct OMPTraitSelector; struct OMPTraitSet; class OMPTraitInfo; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class ParsingOpenMPDirectiveRAII; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; mutable IdentifierInfo *Ident_abstract; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool, Ident_Bool - cached IdentifierInfos for "vector" /// and "bool" fast comparison. Only present if AltiVec or ZVector are /// enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; IdentifierInfo *Ident_Bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> FloatControlHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> MSFenvAccess; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFenvAccessHandler; std::unique_ptr<PragmaHandler> STDCFenvRoundHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// Parsing OpenMP directive mode. bool OpenMPDirectiveParsing = false; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// Current kind of OpenMP clause OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; void MaybeDestroyTemplateIds() { if (!TemplateIds.empty() && (Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens())) DestroyTemplateIds(); } void DestroyTemplateIds(); /// RAII object to destroy TemplateIdAnnotations where possible, from a /// likely-good position during parsing. struct DestroyTemplateIdAnnotationsRAIIObj { Parser &Self; DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {} ~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); } }; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// Handle the annotation token produced for /// #pragma STDC FENV_ROUND... void HandlePragmaFEnvRound(); /// Handle the annotation token produced for /// #pragma float_control void HandlePragmaFloatControl(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static TypeResult getTypeAnnotation(const Token &Tok) { if (!Tok.getAnnotationValue()) return TypeError(); return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, TypeResult T) { assert((T.isInvalid() || T.get()) && "produced a valid-but-null type annotation?"); Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr()); } static NamedDecl *getNonTypeAnnotation(const Token &Tok) { return static_cast<NamedDecl*>(Tok.getAnnotationValue()); } static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) { Tok.setAnnotationValue(ND); } static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) { return static_cast<IdentifierInfo*>(Tok.getAnnotationValue()); } static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) { Tok.setAnnotationValue(ND); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); bool MightBeCXXScopeToken() { return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) || (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) || Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super); } bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) { return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext); } private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && Tok.getIdentifierInfo() != Ident_Bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser &p) : P(p), PrevPreferredType(P.PreferredType) { PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); /// Kinds of compound pseudo-tokens formed by a sequence of two real tokens. enum class CompoundToken { /// A '(' '{' beginning a statement-expression. StmtExprBegin, /// A '}' ')' ending a statement-expression. StmtExprEnd, /// A '[' '[' beginning a C++11 or C2x attribute. AttrBegin, /// A ']' ']' ending a C++11 or C2x attribute. AttrEnd, /// A '::' '*' forming a C++ pointer-to-member declaration. MemberPtr, }; /// Check that a compound operator was written in a "sensible" way, and warn /// if not. void checkCompoundToken(SourceLocation FirstTokLoc, tok::TokenKind FirstTokKind, CompoundToken Op); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// Introduces zero or more scopes for parsing. The scopes will all be exited /// when the object is destroyed. class MultiParseScope { Parser &Self; unsigned NumScopes = 0; MultiParseScope(const MultiParseScope&) = delete; public: MultiParseScope(Parser &Self) : Self(Self) {} void Enter(unsigned ScopeFlags) { Self.EnterScope(ScopeFlags); ++NumScopes; } void Exit() { while (NumScopes) { Self.ExitScope(); --NumScopes; } } ~MultiParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); /// Re-enter the template scopes for a declaration that might be a template. unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. Balances (), [], and {} delimiter tokens while /// skipping. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); /// The location of the first statement inside an else that might /// have a missleading indentation. If there is no /// MisleadingIndentationChecker on an else active, this location is invalid. SourceLocation MisleadingIndentationElseLoc; private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); virtual void ParseLexedPragmas(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; void ParseLexedPragmas() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; /// Contains the lexed tokens of a pragma with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. class LateParsedPragma : public LateParsedDeclaration { Parser *Self = nullptr; AccessSpecifier AS = AS_none; CachedTokens Toks; public: explicit LateParsedPragma(Parser *P, AccessSpecifier AS) : Self(P), AS(AS) {} void takeToks(CachedTokens &Cached) { Toks.swap(Cached); } const CachedTokens &toks() const { return Toks; } AccessSpecifier getAccessSpecifier() const { return AS; } void ParseLexedPragmas() override; }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser *Self; /// Method - The method declaration. Decl *Method; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) {} /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr) {} ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; // In ParseCXXInlineMethods.cpp. struct ReenterTemplateScopeRAII; struct ReenterClassScopeRAII; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); void ParseLexedPragmas(ParsingClass &Class); void ParseLexedPragma(LateParsedPragma &LP); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc is filled with the location of the last token of the simple-asm. ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc); ExprResult ParseAsmStringLiteral(bool ForAsmLabel); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); ExprResult ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause); ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); /// Control what ParseCastExpression will parse. enum CastParseKind { AnyCastExpr = 0, UnaryExprOnly, PrimaryExprOnly }; ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseSYCLUniqueStableNameExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>()); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHasErrors, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false, bool InUsingDeclaration = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions /// Result of tentatively parsing a lambda-introducer. enum class LambdaIntroducerTentativeParse { /// This appears to be a lambda-introducer, which has been fully parsed. Success, /// This is a lambda-introducer, but has not been fully parsed, and this /// function needs to be called again to parse it. Incomplete, /// This is definitely an Objective-C message send expression, rather than /// a lambda-introducer, attribute-specifier, or array designator. MessageSend, /// This is not a lambda-introducer. Invalid, }; // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); bool ParseLambdaIntroducer(LambdaIntroducer &Intro, LambdaIntroducerTentativeParse *Tentative = nullptr); ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); /// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast. ExprResult ParseBuiltinBitCast(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, bool MissingOK, ForRangeInfo *FRI = nullptr, bool EnterForConditionScope = false); DeclGroupPtrTy ParseAliasDeclarationInInitStatement(DeclaratorContext Context, ParsedAttributesWithRange &Attrs); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C++ Concepts ExprResult ParseRequiresExpression(); void ParseTrailingRequiresClause(Declarator &D); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); struct DesignatorCompletionInfo { SmallVectorImpl<Expr *> &InitExprs; QualType PreferredBaseType; }; ExprResult ParseInitializerWithPotentialDesignator(DesignatorCompletionInfo); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK, bool MissingOK, SourceLocation *LParenLoc, SourceLocation *RParenLoc); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Whether a defining-type-specifier is permitted in a given context. enum class AllowDefiningTypeSpec { /// The grammar doesn't allow a defining-type-specifier here, and we must /// not parse one (eg, because a '{' could mean something else). No, /// The grammar doesn't allow a defining-type-specifier here, but we permit /// one for error recovery purposes. Sema will reject. NoButErrorRecovery, /// The grammar allows a defining-type-specifier here, even though it's /// always invalid. Sema will reject. YesButInvalid, /// The grammar allows a defining-type-specifier here, and one can be valid. Yes }; /// Is this a context in which we are parsing defining-type-specifiers (and /// so permit class and enum definitions in addition to non-defining class and /// enum elaborated-type-specifiers)? static AllowDefiningTypeSpec isDefiningTypeSpecifierContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: return AllowDefiningTypeSpec::Yes; case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: return AllowDefiningTypeSpec::YesButInvalid; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: return AllowDefiningTypeSpec::NoButErrorRecovery; case DeclSpecContext::DSC_trailing: return AllowDefiningTypeSpec::No; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which an opaque-enum-declaration can appear? static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: return true; case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, SourceLocation *DeclSpecStart = nullptr); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr, SourceLocation *DeclSpecStart = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType, RecordDecl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return Tok.is(tok::kw_using) || isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Determine whether we could have an enum-base. /// /// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise /// only consider this to be an enum-base if the next token is a '{'. /// /// \return \c false if this cannot possibly be an enum base; \c true /// otherwise. bool isEnumBase(bool AllowSemi); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an '(' after an 'explicit' keyword is part of a C++20 /// 'explicit(bool)' declaration, in earlier language modes where that is an /// extension. TPResult isExplicitBool(); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); /// Try to skip a possibly empty sequence of 'attribute-specifier's without /// full validation of the syntactic structure of attributes. bool TrySkipAttributes(); /// Diagnoses use of _ExtInt as being deprecated, and diagnoses use of /// _BitInt as an extension when appropriate. void DiagnoseBitIntUse(const Token &Tok); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeName, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID, bool DiagnoseEmptyAttrs = false); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Emit warnings for C++11 and C2x attributes that are in a position that /// clang accepts as an extension. void DiagnoseCXX11AttributeExtension(ParsedAttributesWithRange &Attrs); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); enum ParseAttrKindMask { PAKM_GNU = 1 << 0, PAKM_Declspec = 1 << 1, PAKM_CXX11 = 1 << 2, }; /// \brief Parse attributes based on what syntaxes are desired, allowing for /// the order to vary. e.g. with PAKM_GNU | PAKM_Declspec: /// __attribute__((...)) __declspec(...) __attribute__((...))) /// Note that Microsoft attributes (spelled with single square brackets) are /// not supported by this because of parsing ambiguities with other /// constructs. /// /// There are some attribute parse orderings that should not be allowed in /// arbitrary order. e.g., /// /// [[]] __attribute__(()) int i; // OK /// __attribute__(()) [[]] int i; // Not OK /// /// Such situations should use the specific attribute parsing functionality. void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributesWithRange &Attrs, SourceLocation *End = nullptr, LateParsedAttrList *LateAttrs = nullptr); void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs, SourceLocation *End = nullptr, LateParsedAttrList *LateAttrs = nullptr) { ParsedAttributesWithRange AttrsWithRange(AttrFactory); ParseAttributes(WhichAttrKinds, AttrsWithRange, End, LateAttrs); Attrs.takeAllFrom(AttrsWithRange); } /// \brief Possibly parse attributes based on what syntaxes are desired, /// allowing for the order to vary. bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributesWithRange &Attrs, SourceLocation *End = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) || (standardAttributesAllowed() && isCXX11AttributeSpecifier())) { ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs); return true; } return false; } bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs, SourceLocation *End = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) || (standardAttributesAllowed() && isCXX11AttributeSpecifier())) { ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs); return true; } return false; } void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } /// Parses GNU-style attributes and returns them without source range /// information. /// /// This API is discouraged. Use the version that takes a /// ParsedAttributesWithRange instead. bool MaybeParseGNUAttributes(ParsedAttributes &Attrs, SourceLocation *EndLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributesWithRange AttrsWithRange(AttrFactory); ParseGNUAttributes(Attrs, EndLoc, LateAttrs); Attrs.takeAllFrom(AttrsWithRange); return true; } return false; } bool MaybeParseGNUAttributes(ParsedAttributesWithRange &Attrs, SourceLocation *EndLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParseGNUAttributes(Attrs, EndLoc, LateAttrs); return true; } return false; } /// Parses GNU-style attributes and returns them without source range /// information. /// /// This API is discouraged. Use the version that takes a /// ParsedAttributesWithRange instead. void ParseGNUAttributes(ParsedAttributes &Attrs, SourceLocation *EndLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr) { ParsedAttributesWithRange AttrsWithRange(AttrFactory); ParseGNUAttributes(AttrsWithRange, EndLoc, LateAttrs, D); Attrs.takeAllFrom(AttrsWithRange); } void ParseGNUAttributes(ParsedAttributesWithRange &Attrs, SourceLocation *EndLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ReplayOpenMPAttributeTokens(CachedTokens &OpenMPTokens) { // If parsing the attributes found an OpenMP directive, emit those tokens // to the parse stream now. if (!OpenMPTokens.empty()) { PP.EnterToken(Tok, /*IsReinject*/ true); PP.EnterTokenStream(OpenMPTokens, /*DisableMacroExpansion*/ true, /*IsReinject*/ true); ConsumeAnyToken(/*ConsumeCodeCompletionTok*/ true); } } void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } bool MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); return true; } return false; } bool MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) { ParseCXX11Attributes(attrs, endLoc); return true; } return false; } void ParseOpenMPAttributeArgs(IdentifierInfo *AttrName, CachedTokens &OpenMPTokens); void ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs, CachedTokens &OpenMPTokens, SourceLocation *EndLoc = nullptr); void ParseCXX11AttributeSpecifier(ParsedAttributes &Attrs, SourceLocation *EndLoc = nullptr) { CachedTokens OpenMPTokens; ParseCXX11AttributeSpecifierInternal(Attrs, OpenMPTokens, EndLoc); ReplayOpenMPAttributeTokens(OpenMPTokens); } void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, CachedTokens &OpenMPTokens); IdentifierInfo *TryParseCXX11AttributeIdentifier( SourceLocation &Loc, Sema::AttributeCompletion Completion = Sema::AttributeCompletion::None, const IdentifierInfo *EnclosingScope = nullptr); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); bool MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) { ParseMicrosoftDeclSpecs(Attrs, End); return true; } return false; } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseSwiftNewTypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); ExprResult ParseExtIntegerArgument(); void ParsePtrauthQualifier(ParsedAttributes &Attrs); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; bool isClassCompatibleKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); void InitCXXThisScopeForDeclaratorIfRelevant( const Declarator &D, const DeclSpec &DS, llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( DeclaratorContext DeclaratorContext, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributesWithRange &Attrs, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse a property kind into \p TIProperty for the selector set \p Set and /// selector \p Selector. void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty, llvm::omp::TraitSet Set, llvm::omp::TraitSelector Selector, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector kind into \p TISelector for the selector set \p Set. void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector set kind into \p TISet. void parseOMPTraitSetKind(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context property. void parseOMPContextProperty(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context selector. void parseOMPContextSelector(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &SeenSelectors); /// Parses an OpenMP context selector set. void parseOMPContextSelectorSet(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &SeenSets); /// Parses OpenMP context selectors. bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI); /// Parse an 'append_args' clause for '#pragma omp declare variant'. bool parseOpenMPAppendArgs( SmallVectorImpl<OMPDeclareVariantAttr::InteropType> &InterOpTypes); /// Parse a `match` clause for an '#pragma omp declare variant'. Return true /// if there was an error. bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI, OMPTraitInfo *ParentTI); /// Parse clauses for '#pragma omp declare variant'. void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse 'omp [begin] assume[s]' directive. void ParseOpenMPAssumesDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Parse 'omp end assumes' directive. void ParseOpenMPEndAssumesDirective(SourceLocation Loc); /// Parse clauses for '#pragma omp [begin] declare target'. void ParseOMPDeclareTargetClauses(Sema::DeclareTargetContextInfo &DTCI); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind BeginDKind, OpenMPDirectiveKind EndDKind, SourceLocation Loc); /// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if /// it is not the current token. void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind); /// Check the \p FoundKind against the \p ExpectedKind, if not issue an error /// that the "end" matching the "begin" directive of kind \p BeginKind was not /// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd /// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`. void parseOMPEndDirective(OpenMPDirectiveKind BeginKind, OpenMPDirectiveKind ExpectedKind, OpenMPDirectiveKind FoundKind, SourceLocation MatchingLoc, SourceLocation FoundLoc, bool SkipUntilOpenMPEnd); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Tries to parse cast part of OpenMP array shaping operation: /// '[' expression ']' { '[' expression ']' } ')'. bool tryParseOpenMPArrayShapingCastPart(); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. StmtResult ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses indirect clause /// \param ParseOnly true to skip the clause's semantic actions and return // false; bool ParseOpenMPIndirectClause(Sema::DeclareTargetContextInfo &DTCI, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param DKind Directive kind. /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses the 'sizes' clause of a '#pragma omp tile' directive. OMPClause *ParseOpenMPSizesClause(); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses and creates OpenMP 5.0 iterators expression: /// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier = /// <range-specification> }+ ')' ExprResult ParseOpenMPIteratorsExpr(); /// Parses allocators and traits in the context of the uses_allocator clause. /// Expected format: /// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')' OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind); /// Parses clause with an interop variable of kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. // OMPClause *ParseOpenMPInteropClause(OpenMPClauseKind Kind, bool ParseOnly); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc, bool IsAddressOfOperand = false); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *DepModOrTailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or ///< lastprivate clause. SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers> MapTypeModifiers; SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers> MapTypeModifiersLoc; SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers> MotionModifiers; SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc; bool IsMapTypeImplicit = false; SourceLocation ExtraModifierLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); TPResult isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); bool isTypeConstraintAnnotation(); bool TryAnnotateTypeConstraint(); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc, SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc, TemplateTy NameHint = nullptr); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true, bool TypeConstraint = false); void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS, bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs, TemplateTy Template, SourceLocation OpenLoc); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); // C++2a: Template, concept definition [temp] Decl * ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); /// Parse the given string as a type. /// /// This is a dangerous utility function currently employed only by API notes. /// It is not a general entry-point for safely parsing types from strings. /// /// \param typeStr The string to be parsed as a type. /// \param context The name of the context in which this string is being /// parsed, which will be used in diagnostics. /// \param includeLoc The location at which this parse was triggered. TypeResult parseTypeFromString(StringRef typeStr, StringRef context, SourceLocation includeLoc); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); ExprResult ParseBuiltinPtrauthTypeDiscriminator(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; class GNUAsmQualifiers { unsigned Qualifiers = AQ_unspecified; public: enum AQ { AQ_unspecified = 0, AQ_volatile = 1, AQ_inline = 2, AQ_goto = 4, }; static const char *getQualifierName(AQ Qualifier); bool setAsmQualifier(AQ Qualifier); inline bool isVolatile() const { return Qualifiers & AQ_volatile; }; inline bool isInline() const { return Qualifiers & AQ_inline; }; inline bool isGoto() const { return Qualifiers & AQ_goto; } }; bool isGCCAsmStatement(const Token &TokAfterAsm) const; bool isGNUAsmQualifier(const Token &TokAfterAsm) const; GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const; bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ); }; } // end namespace clang #endif
Par-14-NestedFollowingNestedPar.c
int main(int argc, char **argv) { int a[4] = {1,2,3,4}; int b[4] = {1,1,1,1}; int c[4] = {0,2,1,3}; for (int i = 0; i < 1; ++i) { if (i < 2) { return -1; } } #pragma omp parallel for for (int i = 0; i < 4; ++i) { a[i] = 3*a[i]; #pragma omp parallel for for(int j = 0; j < 4; ++j) { b[j] += a[i]; } #pragma omp parallel for for(int k = 0; k < 4; ++k) { c[k] = a[i] * b[k] + c[k]; #pragma omp parallel for for(int j = 0; j < 4; ++j) { b[j] += a[i]; } } } return 0; }
validation.c
int isValid() { #pragma omp barrier double actual = 27885326029.756424; double s_sum = 0.0; double q_sum = 0.0; double rand1=0.1, rand2=0.9; double expected=0.0; int i,j; double diff=0.0; for(i=0; i<=N-1; i++) for (j=0; j<=N-1; j++) s_sum+=B[i][j]*rand1*rand2; expected = s_sum; diff=abs(expected-actual); //printf("expected=%f\n",expected); //printf("actual=%f\n",actual); //printf("diff=%f\n",diff); //printf("diff=%d\n",(diff < 0.00000001)); if (diff < 0.00000001) return 1; else return 0; }
SE3P_Stokes_direct.c
#include <math.h> #include "SE_Stokes_direct.h" #ifdef HASIMOTO #include "hasimoto_decomp.h" #elif BEENAKKER #include "beenakker_decomp.h" #else #error "Must provide -D<decomposition> to compiler" #endif #ifdef _OPENMP #include <omp.h> #endif void SE3P_Stokes_direct_real(double* restrict u, const int* restrict idx, int nidx, const double* restrict x, const double* restrict f, int N, const ewald_opts opt) { const int nbox = opt.layers; double r[3]; double xm[3]; double A[3][3]; int i1, i2, i3, m, n; for(m=0; m<nidx; m++) // for all evaluation points { u[m ] = 0; u[m+nidx ] = 0; u[m+2*nidx] = 0; xm[0] = x[idx[m] ]; // indirect indexing OK in outer loop xm[1] = x[idx[m]+N ]; xm[2] = x[idx[m]+2*N]; for(i1 = -nbox; i1<=nbox; i1++) // image boxes for(i2 = -nbox; i2<=nbox; i2++) for(i3 = -nbox; i3<=nbox; i3++) { for(n=0; n<N; n++) // for all particles { if(i1==0 && i2==0 && i3==0 && n==idx[m]) // skip self continue; r[0] = xm[0]-x[n ]+opt.box[0]*i1; r[1] = xm[1]-x[n+ N]+opt.box[1]*i2; r[2] = xm[2]-x[n+2*N]+opt.box[2]*i3; op_A(A,r,opt.xi); // u += A*f u[m ] += A[0][0]*f[n]+A[0][1]*f[n+N]+A[0][2]*f[n+2*N]; u[m+nidx ] += A[1][0]*f[n]+A[1][1]*f[n+N]+A[1][2]*f[n+2*N]; u[m+2*nidx] += A[2][0]*f[n]+A[2][1]*f[n+N]+A[2][2]*f[n+2*N]; } } } } void SE3P_Stokes_direct_real_rc(double* restrict u, const int* restrict idx, int nidx, const double* restrict x, const double* restrict f, int N, const ewald_opts opt) { const int nbox = opt.layers; double r[3]; double xm[3]; double A[3][3]; int i1, i2, i3, m, n; for(m=0; m<nidx; m++) // for all evaluation points { u[m ] = 0; u[m+nidx ] = 0; u[m+2*nidx] = 0; xm[0] = x[idx[m] ]; // indirect indexing OK in outer loop xm[1] = x[idx[m]+N ]; xm[2] = x[idx[m]+2*N]; for(i1 = -nbox; i1<=nbox; i1++) // image boxes for(i2 = -nbox; i2<=nbox; i2++) for(i3 = -nbox; i3<=nbox; i3++) { for(n=0; n<N; n++) // for all particles { if(i1==0 && i2==0 && i3==0 && n==idx[m]) // skip self continue; r[0] = xm[0]-x[n ]+opt.box[0]*i1; r[1] = xm[1]-x[n+ N]+opt.box[1]*i2; r[2] = xm[2]-x[n+2*N]+opt.box[2]*i3; if(sqrt(r[0]*r[0] + r[1]*r[1] + r[2]*r[2]) > opt.rc) continue; // skip outside rc op_A(A,r,opt.xi); // u += A*f u[m ] += A[0][0]*f[n]+A[0][1]*f[n+N]+A[0][2]*f[n+2*N]; u[m+nidx ] += A[1][0]*f[n]+A[1][1]*f[n+N]+A[1][2]*f[n+2*N]; u[m+2*nidx] += A[2][0]*f[n]+A[2][1]*f[n+N]+A[2][2]*f[n+2*N]; } } } } void SE3P_Stokes_direct_real_ext_rc(double* restrict u, const double* restrict xt, const int Nt, const double* restrict x, const double* restrict f, const int N, const ewald_opts opt) { const int nbox = opt.layers; double r[3]; double xm[3]; double A[3][3]; int i1, i2, i3, m, n; #ifdef _OPENMP #pragma omp parallel for \ private(r,xm,A,i1,i2,i3,m,n) \ shared(u,xt,x,f) \ default(none) #endif for(m=0; m<Nt; m++) // for all evaluation points { double um[3] = {0.0, 0.0, 0.0}; xm[0] = xt[m ]; xm[1] = xt[m+Nt ]; xm[2] = xt[m+2*Nt]; for(i1 = -nbox; i1<=nbox; i1++) // image boxes for(i2 = -nbox; i2<=nbox; i2++) for(i3 = -nbox; i3<=nbox; i3++) { for(n=0; n<N; n++) // for all particles { // Assuming that r != 0 in home box r[0] = xm[0]-x[n ]+opt.box[0]*i1; r[1] = xm[1]-x[n+ N]+opt.box[1]*i2; r[2] = xm[2]-x[n+2*N]+opt.box[2]*i3; if(sqrt(r[0]*r[0] + r[1]*r[1] + r[2]*r[2]) > opt.rc) continue; // skip outside rc op_A(A,r,opt.xi); // u += A*f um[0] += A[0][0]*f[n]+A[0][1]*f[n+N]+A[0][2]*f[n+2*N]; um[1] += A[1][0]*f[n]+A[1][1]*f[n+N]+A[1][2]*f[n+2*N]; um[2] += A[2][0]*f[n]+A[2][1]*f[n+N]+A[2][2]*f[n+2*N]; } } u[m ] = um[0]; u[m+Nt ] = um[1]; u[m+2*Nt] = um[2]; } } void SE3P_Stokes_direct_fd(double* restrict u, const int* restrict idx, int nidx, const double* restrict x, const double* restrict f, int N, const ewald_opts opt) { double B[3][3]; double z[3]; double k[3]; double xm[3]; int i1, i2, i3, m, n; double q, k_dot_r; const double vol = opt.box[0]*opt.box[1]*opt.box[2]; const int kmax=opt.layers; const double kc0 = 2.0*PI/opt.box[0]; const double kc1 = 2.0*PI/opt.box[1]; const double kc2 = 2.0*PI/opt.box[2]; for(m=0; m<nidx; m++) // for all evaluation points { u[m ] = 0; u[m+ nidx] = 0; u[m+2*nidx] = 0; xm[0] = x[idx[m] ]; // indirect indexing OK in outer loop xm[1] = x[idx[m]+N ]; xm[2] = x[idx[m]+2*N]; for(i1 = -kmax; i1<=kmax; i1++) // for k-space cube for(i2 = -kmax; i2<=kmax; i2++) for(i3 = -kmax; i3<=kmax; i3++) { if(i3 != 0 || i2 != 0 || i1 != 0) // exclude k=0 { z[0] = 0; z[1] = 0; z[2] = 0; k[0] = kc0*i1; k[1] = kc1*i2; k[2] = kc2*i3; for(n=0; n<N; n++) // for all particles { k_dot_r = k[0]*(xm[0]-x[n ])+ k[1]*(xm[1]-x[n+N ])+ k[2]*(xm[2]-x[n+2*N]); q = cos(k_dot_r); z[0] += q*f[n ]; z[1] += q*f[n+ N]; z[2] += q*f[n+2*N]; } op_B(B,k,opt.xi); // multiplication u[m ] += B[0][0]*z[0]+B[0][1]*z[1]+B[0][2]*z[2]; u[m+nidx ] += B[1][0]*z[0]+B[1][1]*z[1]+B[1][2]*z[2]; u[m+2*nidx] += B[2][0]*z[0]+B[2][1]*z[1]+B[2][2]*z[2]; } } u[m ] /= vol; u[m+ nidx] /= vol; u[m+2*nidx] /= vol; } } void SE3P_Stokes_direct_self(double* restrict u, const int* restrict idx, int nidx, const double* restrict f, int N, const ewald_opts opt) { double c = self_coeff(opt.xi); for(int m=0; m<nidx; m++) { u[m ] = c*f[idx[m] ]; u[m+ nidx] = c*f[idx[m]+N ]; u[m+2*nidx] = c*f[idx[m]+2*N]; } }
safeomp.h
// tl;dr: You may use the definitions in this header file as you please, // with or without attribution. No warranty is provided. This "license" applies // ONLY to this particular file, and not the larger project in which it is // contained. If you want me to license it differently for you for some reason, // email me. // This file is free and unencumbered software released into the public domain. // // Anyone is free to copy, modify, publish, use, compile, sell, or distribute // this file, for any purpose, and by any means. // // In jurisdictions that recognize copyright laws, the author or authors of this // file dedicate any and all copyright interest in the file to the public // domain. We make this dedication for the benefit of the public at large and to // the detriment of our heirs and successors. We intend this dedication to be an // overt act of relinquishment in perpetuity of all present and future rights to // this file under copyright law. // // THE FILE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN // ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THIS FILE OR THE USE OR OTHER DEALINGS IN THIS FILE. // // For more information, please refer to http://unlicense.org/ #ifndef __FASTMAP_LIB_OMPUTILS_H__ #define __FASTMAP_LIB_OMPUTILS_H__ // ----------------------------------------------------------------------------- // Non-OMP vectorization stuff // ----------------------------------------------------------------------------- // Praying to the compiler to please ignore what it thinks are loop dependencies // and to please just go ahead and vectorize the loop already. Useful for // loops outside of omp parallel blocks. // GCC HAS TO GO LAST since so many compilers support --std=gnu99 and so on #if defined(__INTEL_COMPILER) #define PLEASE_VECTORIZE _Pragma("ivdep") #elif defined(__clang__) #define PLEASE_VECTORIZE _Pragma("clang loop vectorize(enable) interleave(enable)") #elif defined(__GNUC__) #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 9) #define PLEASE_VECTORIZE _Pragma("GCC ivdep") #else #define PLEASE_VECTORIZE #endif #else #define PLEASE_VECTORIZE #endif // ----------------------------------------------------------------------------- // OMP stuff // ----------------------------------------------------------------------------- #define OMP_MIN_SIZE 1000 #ifdef _OPENMP #include <omp.h> #if _OPENMP >= 201307 #define OMP_VER_4 #elif _OPENMP >= 200805 #define OMP_VER_3 #endif #endif #ifdef _OPENMP #include <omp.h> static inline int nthreads() { int ret; #pragma omp parallel ret =omp_get_num_threads(); return ret; } #else #define omp_get_thread_num() 0 static inline int nthreads() { return 1; } #endif // Insert SIMD pragma if supported. This pragma _demands_ vectorization, whether // or not the compiler thinks that's a good idea. However, it will only work // inside of an omp parallel block. #ifdef OMP_VER_4 #ifdef _MSC_VER // Microsoft doing it's own non-standard bullshit? I DON'T BELIEVE IT #define SAFE_SIMD _pragma(omp simd) #define SAFE_FOR_SIMD _pragma(omp for simd) #define SAFE_PARALLEL_FOR_SIMD _pragma(omp parallel for simd) #else #define SAFE_SIMD _Pragma("omp simd") #define SAFE_FOR_SIMD _Pragma("omp for simd") #define SAFE_PARALLEL_FOR_SIMD _Pragma("omp parallel for simd") #endif #else #define SAFE_SIMD PLEASE_VECTORIZE #define SAFE_FOR_SIMD PLEASE_VECTORIZE #define SAFE_PARALLEL_FOR_SIMD //TODO #endif #endif
parallel.h
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <algorithm> #ifdef PADDLE_WITH_MKLML #include <omp.h> #include "lite/backends/x86/mklml.h" #endif namespace paddle { namespace lite { namespace x86 { static void SetNumThreads(int num_threads) { #ifdef PADDLE_WITH_MKLML int real_num_threads = std::max(num_threads, 1); x86::MKL_Set_Num_Threads(real_num_threads); omp_set_num_threads(real_num_threads); #endif } static inline int64_t GetMaxThreads() { int64_t num_threads = 1; #ifdef PADDLE_WITH_MKLML // Do not support nested omp parallem. num_threads = omp_in_parallel() ? 1 : omp_get_max_threads(); #endif return std::max(num_threads, 1L); } using ThreadHandler = std::function<void(const int64_t begin, const int64_t end)>; static inline void RunParallelFor(const int64_t begin, const int64_t end, const ThreadHandler& f) { if (begin >= end) { return; } #ifdef PADDLE_WITH_MKLML int64_t num_threads = std::min(GetMaxThreads(), end - begin); if (num_threads > 1) { #pragma omp parallel num_threads(num_threads) { int64_t tid = omp_get_thread_num(); int64_t chunk_size = (end - begin + num_threads - 1) / num_threads; int64_t begin_tid = begin + tid * chunk_size; f(begin_tid, std::min(end, chunk_size + begin_tid)); } return; } #endif f(begin, end); } } // namespace x86 } // namespace lite } // namespace paddle
GB_binop__eq_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_fc32) // A.*B function (eWiseMult): GB (_AemultB_08__eq_fc32) // A.*B function (eWiseMult): GB (_AemultB_02__eq_fc32) // A.*B function (eWiseMult): GB (_AemultB_04__eq_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_fc32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__eq_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__eq_fc32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_fc32) // C=scalar+B GB (_bind1st__eq_fc32) // C=scalar+B' GB (_bind1st_tran__eq_fc32) // C=A+scalar GB (_bind2nd__eq_fc32) // C=A'+scalar GB (_bind2nd_tran__eq_fc32) // C type: bool // A type: GxB_FC32_t // A pattern? 0 // B type: GxB_FC32_t // B pattern? 0 // BinaryOp: cij = GB_FC32_eq (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = (crealf (GBX (Ax, pA, A_iso)) != 0) || (cimagf (GBX (Ax, pA, A_iso)) != 0) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = (crealf (GBX (Bx, pB, B_iso)) != 0) || (cimagf (GBX (Bx, pB, B_iso)) != 0) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC32_eq (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_FC32 || GxB_NO_EQ_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__eq_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC32_t alpha_scalar ; GxB_FC32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_fc32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC32_eq (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_fc32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC32_eq (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_eq (x, aij) ; \ } GrB_Info GB (_bind1st_tran__eq_fc32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC32_eq (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__eq_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB024-simdtruedep-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This one has data races due to true dependence. But data races happen at instruction level, not thread level. Data race pair: a[i+1]@66:5 vs. a[i]@66:12 */ #include <stdio.h> int main(int argc, char* argv[]) { int i; int len=100; int a[100], b[100]; #pragma omp parallel for for (i=0;i<len;i++) { a[i]=i; b[i]=i+1; } for (i=0;i<len-1;i++) a[i+1]=a[i]+b[i]; for (i=0;i<len;i++) printf("i=%d a[%d]=%d\n",i,i,a[i]); return 0; }
GB_unaryop__minv_fp64_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_fp64_uint32 // op(A') function: GB_tran__minv_fp64_uint32 // C type: double // A type: uint32_t // cast: double cij = (double) aij // unaryop: cij = 1./aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1./x ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FP64 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_fp64_uint32 ( double *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_fp64_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
zpbsv.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_pbsv * * Computes the solution to a system of linear equations A * X = B, * where A is an n-by-n Hermitian positive definite band matrix, and X and B * are n-by-nrhs matrices. The Cholesky decomposition is used to factor A as * * \f[ A = L\times L^H, \f] if uplo = PlasmaLower, * or * \f[ A = U^H\times U, \f] if uplo = PlasmaUpper, * * where U is an upper triangular matrix and L is a lower triangular matrix. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] n * The number of linear equations, i.e., the order of the matrix A. * n >= 0. * * @param[in] kd * The number of subdiagonals within the band of A if uplo=upper. * The number of suuperdiagonals within the band of A. ku >= 0. * * @param[in] nrhs * The number of right hand sides, i.e., the number of columns * of the matrix B. nrhs >= 0. * * @param[in,out] AB * On entry, the upper or lower triangle of the Hermitian band * matrix A, stored in the first KD+1 rows of the array. The * j-th column of A is stored in the j-th column of the array AB * as follows: * if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd) <= i <= j; * if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j <= i <= min(n,j+kd). * \n * On exit, if INFO = 0, the triangular factor U or L from the * Cholesky factorization A = U^H*U or A = L*L^H of the band * matrix A, in the same storage format as A. * * @param[in] ldab * The leading dimension of the array AB. ldab >= max(1,n). * * @param[in,out] B * On entry, the n-by-nrhs right hand side matrix B. * On exit, if return value = 0, the n-by-nrhs solution matrix X. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * @retval > 0 if i, the leading minor of order i of A is not * positive definite, so the factorization could not * be completed, and the solution has not been computed. * ******************************************************************************* * * @sa plasma_omp_zpbsv * @sa plasma_cpbsv * @sa plasma_dpbsv * @sa plasma_spbsv * ******************************************************************************/ int plasma_zpbsv(plasma_enum_t uplo, int n, int kd, int nrhs, plasma_complex64_t *pAB, int ldab, plasma_complex64_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (kd < 0) { plasma_error("illegal value of kd"); return -3; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -4; } if (ldab < kd+1) { plasma_error("illegal value of ldab"); return -6; } if (ldb < imax(1, n)) { plasma_error("illegal value of ldb"); return -8; } // quick return if (imin(n, nrhs) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_pbtrf(plasma, PlasmaComplexDouble, n); // Set tiling parameters. int nb = plasma->nb; // Initialize tile matrix descriptors. int lm = nb*(1+(kd+nb-1)/nb); plasma_desc_t AB; plasma_desc_t B; int retval; retval = plasma_desc_general_band_create(PlasmaComplexDouble, uplo, nb, nb, lm, n, 0, 0, n, n, kd, kd, &AB); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_band_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, ldb, nrhs, 0, 0, n, nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&AB); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zpb2desc(pAB, ldab, AB, &sequence, &request); plasma_omp_zge2desc(pB, ldb, B, &sequence, &request); // Call the tile async function. plasma_omp_zpbsv(uplo, AB, B, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2pb(AB, pAB, ldab, &sequence, &request); plasma_omp_zdesc2ge(B, pB, ldb, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&AB); plasma_desc_destroy(&B); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_pbsv * * Solves a Hermitian positive definite band system of linear equations * using Cholesky factorization. * Non-blocking tile version of plasma_zpbsv(). * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in,out] AB * Descriptor of matrix A. * * @param[in,out] B * Descriptor of right-hand-sides B. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zpbsv * @sa plasma_omp_cpbsv * @sa plasma_omp_dpbsv * @sa plasma_omp_spbsv * ******************************************************************************/ void plasma_omp_zpbsv(plasma_enum_t uplo, plasma_desc_t AB, plasma_desc_t B, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return; } if (plasma_desc_check(AB) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid A"); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (AB.n == 0 || B.n == 0) return; // Call the parallel functions. plasma_pzpbtrf(uplo, AB, sequence, request); plasma_pztbsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaConjTrans : PlasmaNoTrans, PlasmaNonUnit, 1.0, AB, B, NULL, sequence, request); plasma_pztbsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaNoTrans : PlasmaConjTrans, PlasmaNonUnit, 1.0, AB, B, NULL, sequence, request); }
denseraster.h
#pragma once #include "gdx/cell.h" #include "gdx/cpupredicates-private.h" #include "gdx/exception.h" #include "gdx/nodatapredicates-private.h" #include "gdx/rasterchecks.h" #include "gdx/rasteriterator.h" #include "gdx/rastermetadata.h" #include "gdx/simd.h" #include "infra/cast.h" #include "infra/span.h" #include "infra/string.h" #include "rasterutils-private.h" #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4244 4242 4127 4005) #endif #include <Vc/Allocator> #include <Vc/common/simdize.h> #ifdef _MSC_VER #pragma warning(pop) #endif #include <algorithm> #include <cassert> #include <type_traits> #include <vector> namespace gdx { template <typename T> class DenseRaster { public: using value_type = T; using size_type = std::size_t; using data_type = std::vector<T, Vc::Allocator<T>>; using nodata_type = std::optional<value_type>; using pointer = T*; using const_pointer = const T*; using iterator = pointer; using const_iterator = const_pointer; static constexpr bool raster_type_has_nan = std::numeric_limits<T>::has_quiet_NaN; static constexpr bool with_nodata = true; static constexpr T NaN = std::numeric_limits<T>::quiet_NaN(); static constexpr bool has_nan() { return raster_type_has_nan; } static constexpr bool simd_supported() { return !(std::is_same_v<uint8_t, T> || std::is_same_v<int64_t, T> || std::is_same_v<uint64_t, T>); } DenseRaster() = default; DenseRaster(int32_t rows, int32_t cols) : _meta(rows, cols) , _data(rows * cols) { } explicit DenseRaster(RasterMetadata meta) : _meta(std::move(meta)) , _data(_meta.rows * _meta.cols) { init_nodata_values(); } DenseRaster(int32_t rows, int32_t cols, T fillValue) : DenseRaster(RasterMetadata(rows, cols), fillValue) { } DenseRaster(const RasterMetadata& meta, T fillValue) : _meta(meta) , _data(meta.rows * meta.cols) { if constexpr (raster_type_has_nan) { // make sure we fill tha raster with NaNs if the fill value is the nodata value if (_meta.nodata.has_value() && fillValue == static_cast<T>(*_meta.nodata)) { fillValue = NaN; } } fill(fillValue); } DenseRaster(int32_t rows, int32_t cols, std::span<const T> data) : DenseRaster(RasterMetadata(rows, cols), data) { } DenseRaster(const RasterMetadata& meta, std::span<const T> data) : _meta(meta) , _data(meta.rows * meta.cols) { throw_on_datasize_mismatch(meta.rows, meta.cols, data.size()); std::copy(data.begin(), data.end(), _data.data()); init_nodata_values(); } DenseRaster(const RasterMetadata& meta, data_type&& data) : _meta(meta) , _data(data) { if (inf::truncate<int32_t>(_data.size()) != meta.rows * meta.cols) { throw InvalidArgument("Invalid data size provided"); } init_nodata_values(); } DenseRaster(DenseRaster<T>&&) noexcept = default; DenseRaster(const DenseRaster<T>& other) = delete; DenseRaster& operator=(DenseRaster<T>&&) = default; DenseRaster& operator=(const DenseRaster<T>& other) = delete; void resize_and_fill(int32_t rows, int32_t cols, value_type value) { resize(rows, cols); fill(value); } void resize(int32_t rows, int32_t cols) { _meta.rows = rows; _meta.cols = cols; _data.resize(rows * cols); } void resize(int32_t rows, int32_t cols, std::optional<double> nodata) { _meta.rows = rows; _meta.cols = cols; _meta.nodata = nodata; _data.resize(rows * cols); } void set_metadata(RasterMetadata meta) { if (meta.rows * meta.cols != ssize()) { throw InvalidArgument("Cannot change metadata: invalid size"); } _meta = std::move(meta); } DenseRaster<T> copy() const { DenseRaster<T> dst(_meta); dst._data = _data; return dst; } auto begin() { return _data.begin(); } auto begin() const { return cbegin(); } auto cbegin() const { return _data.cbegin(); } auto end() { return _data.end(); } auto end() const { return cend(); } auto cend() const { return _data.cend(); } const value_type* data() const noexcept { return _data.data(); } value_type* data() noexcept { return _data.data(); } bool has_nodata() const noexcept { if (_meta.nodata.has_value()) { if constexpr (raster_type_has_nan) { return std::any_of(begin(), end(), [](T value) { return std::isnan(value); }); } else { return std::any_of(begin(), end(), [nod = static_cast<T>(*_meta.nodata)](T value) { return value == nod; }); } } return false; } std::optional<T> nodata() const noexcept { return inf::optional_cast<T>(_meta.nodata); } std::size_t size() const noexcept { return _data.size(); } std::ptrdiff_t ssize() const noexcept { assert(_data.size() <= std::size_t(std::numeric_limits<std::ptrdiff_t>::max())); return static_cast<std::ptrdiff_t>(_data.size()); } bool empty() const noexcept { return _data.size() == 0; } void collapse_data() { // no collapse needed for non floating point types if constexpr (has_nan()) { if (auto nod = nodata(); nod.has_value() && !std::isnan(*nod)) { if constexpr (simd_supported()) { simd::for_each(begin(), end(), [nodata = *nod](auto& value) { value(std::isnan(value)) = nodata; }); } else { std::transform(begin(), end(), begin(), [nodata = *nod](T value) { return std::isnan(value) ? nodata : value; }); } } } } const RasterMetadata& metadata() const noexcept { return _meta; } void set_projection(int32_t epsg) { _meta.set_projection_from_epsg(epsg); } void clear_projection() { _meta.projection.clear(); } void set_nodata(double newValue) { if constexpr (!raster_type_has_nan) { if (std::isnan(newValue)) { throw InvalidArgument("Nodata value cannot be NaN for integral rasters"); } } _meta.nodata = newValue; } void replace_nodata(T newValue) { const auto dataSize = _data.size(); for (std::size_t i = 0; i < dataSize; ++i) { if (is_nodata(i)) { _data[i] = newValue; } } _meta.nodata.reset(); } void turn_value_into_nodata(T value) { const auto dataSize = _data.size(); for (std::size_t i = 0; i < dataSize; ++i) { if (_data[i] == value) { mark_as_nodata(i); } } } // assigns the value to all the elements of the raster, even nodata void fill(value_type value) { std::fill(_data.begin(), _data.end(), value); } // assigns the value to all the elements of the raster, leaving nodata values intact void fill_values(value_type value) { if (auto nod = nodata(); nod.has_value()) { if constexpr (simd_supported()) { simd::for_each(_data.begin(), _data.end(), [value, nod = *nod](auto& v) { v(v != nod) = value; }); } else { std::for_each(begin(), end(), [=](auto& v) { if (!is_nodata_value(v)) { v = value; } }); } } else { return fill(value); } } // Makes all elements of the raster nodata values void fill_with_nodata() { if (_meta.nodata.has_value()) { if constexpr (raster_type_has_nan) { fill(NaN); } else { fill(static_cast<T>(*_meta.nodata)); } } } int32_t rows() const noexcept { return _meta.rows; } int32_t cols() const noexcept { return _meta.cols; } void mark_as_data(std::size_t /*index*/) noexcept { } void mark_as_data(Cell /*cell*/) noexcept { } void mark_as_data(int32_t /*row*/, int32_t /*col*/) noexcept { } void mark_as_nodata(std::size_t index) { if (!_meta.nodata.has_value()) { throw RuntimeError("mark_as_nodata called without nodata defined"); } if constexpr (raster_type_has_nan) { _data[index] = NaN; } else { _data[index] = static_cast<T>(*_meta.nodata); } } void mark_as_nodata(int32_t row, int32_t col) { mark_as_nodata(index(row, col)); } void mark_as_nodata(Cell cell) { mark_as_nodata(cell.r, cell.c); } std::optional<value_type> optional_value(std::size_t index) const noexcept { if (is_nodata(index)) { return std::optional<value_type>(); } else { return _data[index]; } } template <typename VarType> std::optional<VarType> optional_value_as(std::size_t index) const noexcept { if (is_nodata(index)) { return std::optional<VarType>(); } else { return static_cast<VarType>(_data[index]); } } bool is_nodata_value(T value) const noexcept { if constexpr (raster_type_has_nan) { return std::isnan(value); } else { if (_meta.nodata.has_value()) { return value == *_meta.nodata; } else { return false; } } } bool is_nodata(std::size_t index) const noexcept { if (_meta.nodata.has_value()) { if constexpr (raster_type_has_nan) { return std::isnan(_data[index]); } else { return _data[index] == static_cast<T>(*_meta.nodata); } } return false; } bool is_nodata(const Cell& cell) const noexcept { return is_nodata(cell.r, cell.c); } bool is_nodata(int32_t r, int32_t c) const noexcept { if (_meta.nodata.has_value()) { if constexpr (raster_type_has_nan) { return std::isnan(_data[index(r, c)]); } else { return _data[index(r, c)] == static_cast<T>(*_meta.nodata); } } return false; } bool tolerant_equal_to(const DenseRaster<T>& other, value_type tolerance = std::numeric_limits<value_type>::epsilon()) const noexcept { if (_meta != other._meta) { return false; } return tolerant_data_equal_to(other, tolerance); } bool tolerant_data_equal_to(const DenseRaster<T>& other, value_type relTolerance = value_type(1e-05)) const noexcept { throw_on_size_mismatch(*this, other); cpu::float_equal_to<T> comp(relTolerance); const auto dataSize = size(); for (std::size_t i = 0; i < dataSize; ++i) { if (is_nodata(i) != other.is_nodata(i)) { return false; } if (!is_nodata(i) && !comp(_data[i], other[i])) { return false; } } return true; } /* Add the value to the cell, if the cell is nodata it will become data with the provided value */ void add_to_cell(Cell c, T value) { if (is_nodata(c)) { (*this)[c] = value; } else { (*this)[c] += value; } } bool operator==(const DenseRaster<T>& other) const noexcept { throw_on_size_mismatch(*this, other); const auto dataSize = size(); for (std::size_t i = 0; i < dataSize; ++i) { if (is_nodata(i) != other.is_nodata(i)) { return false; } if (!is_nodata(i) && (_data[i] != other[i])) { return false; } } return true; } bool operator!=(const DenseRaster<T>& other) const noexcept { return !(*this == other); } DenseRaster<uint8_t> not_equals(const DenseRaster<T>& other) const noexcept { throw_on_size_mismatch(*this, other); return perform_binary_operation<nodata::not_equal_to>(other); } template <typename TValue> DenseRaster<uint8_t> not_equals(TValue value) const { static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type"); return perform_unary_operation<nodata::not_equal_to>(value); } template <typename TOther> auto operator+(const DenseRaster<TOther>& other) const { return perform_raster_operation<std::plus<>>(other); } template <typename TValue> auto operator+(TValue value) const { static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type"); return perform_scalar_operation<std::plus<>>(value); } template <typename TValue> DenseRaster<T>& operator+=(TValue value) { return perform_scalar_operation_inplace<std::plus<>>(value); } //! Add values of the other raster to this raster // - Nodata values of this raster will not be assigned // - Nodata values of the other raster will become nodata in the result template <typename TOther> DenseRaster<T>& operator+=(const DenseRaster<TOther>& other) { return perform_raster_operation_in_place<std::plus<>>(other); } //! Add values of the other raster to this raster // - Nodata values of this raster will become the value in the other raster // - Nodata values of the other raster will become nodata in the result template <typename TOther> DenseRaster<T>& add_or_assign(const DenseRaster<TOther>& other) { throw_on_size_mismatch(*this, other); if constexpr (simd_supported() && has_nan() && DenseRaster<TOther>::has_nan() && sizeof(T) == sizeof(TOther)) { simd::transform(cbegin(), cend(), other.cbegin(), begin(), [](auto& v1, auto& v2) { auto w = Vc::simd_cast<Vc::Vector<T, typename std::remove_reference_t<decltype(v1)>::abi>>(v2); auto out = v1; out(Vc::isnan(v1) && !Vc::isnan(w)) = T(0); out(!Vc::isnan(w)) += w; return out; }); } else { const auto dataSize = size(); for (std::size_t i = 0; i < dataSize; ++i) { if (other.is_nodata(i)) { continue; } if (is_nodata(i)) { _data[i] = static_cast<T>(other[i]); } else { _data[i] += static_cast<T>(other[i]); } } } return *this; } template <typename TOther> DenseRaster<T>& add_or_assign(TOther value) { static_assert(std::is_scalar_v<TOther>, "add_or_assign has to be called with a scalar value"); const auto val = static_cast<T>(value); if constexpr (simd_supported() && has_nan()) { simd::transform(cbegin(), cend(), begin(), [val](auto& v) { auto out = v; out(Vc::isnan(v)) = val; out(!Vc::isnan(v)) += val; return out; }); } else { const auto dataSize = size(); for (std::size_t i = 0; i < dataSize; ++i) { if (is_nodata(i)) { _data[i] = val; } else { _data[i] += val; } } } return *this; } DenseRaster<T> operator-() const { if constexpr (std::is_unsigned_v<T>) { throw RuntimeError("Minus operator applied to unsigned value"); } else { DenseRaster<T> result(_meta, DenseRaster<T>::data_type(_data)); std::transform(result.begin(), result.end(), result.begin(), nodata::negate<T>(_meta.nodata)); return result; } } template <typename TOther> auto operator-(const DenseRaster<TOther>& other) const { return perform_raster_operation<std::minus<>>(other); } template <typename TValue> auto operator-(TValue value) const { static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type"); return perform_scalar_operation<std::minus<>>(value); } template <typename TValue> DenseRaster<T>& operator-=(TValue value) { return perform_scalar_operation_inplace<std::minus<>>(value); } template <typename TOther> DenseRaster<T>& operator-=(const DenseRaster<TOther>& other) { return perform_raster_operation_in_place<std::minus<>>(other); } template <typename TOther> auto operator*(const DenseRaster<TOther>& other) const { return perform_raster_operation<std::multiplies<>>(other); } template <typename TValue> auto operator*(TValue value) const { static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type"); return perform_scalar_operation<std::multiplies<>>(value); } template <typename TValue> DenseRaster<T>& operator*=(TValue value) { return perform_scalar_operation_inplace<std::multiplies<>>(value); } template <typename TOther> DenseRaster<T>& operator*=(const DenseRaster<TOther>& other) { return perform_raster_operation_in_place<std::multiplies<>>(other); } template <typename TOther> auto operator/(const DenseRaster<TOther>& other) const { return perform_raster_operation<std::divides<>>(other); } template <typename TValue> auto operator/(TValue value) const { static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type"); if (value == 0) { throw InvalidArgument("Division by zero"); } return perform_scalar_operation<std::divides<>>(value); } template <typename TValue> DenseRaster<T>& operator/=(TValue value) { return perform_scalar_operation_inplace<std::divides<>>(value); } template <typename TOther> DenseRaster<T>& operator/=(const DenseRaster<TOther>& other) { return perform_raster_operation_in_place<std::divides<>>(other); } value_type& operator[](std::size_t index) { return _data[index]; } value_type operator[](std::size_t index) const { return _data[index]; } value_type& operator[](const Cell& cell) { return _data[index(cell.r, cell.c)]; } const value_type& operator[](const Cell& cell) const { return _data[index(cell.r, cell.c)]; } value_type& operator()(int32_t row, int32_t col) { return _data[index(row, col)]; } const value_type& operator()(int32_t row, int32_t col) const { return _data[index(row, col)]; } DenseRaster<uint8_t> operator!() const { return perform_unary_operation<nodata::logical_not>(); } template <typename TOther> DenseRaster<uint8_t> operator&&(const DenseRaster<TOther>& other) const { return perform_binary_operation<nodata::logical_and>(other); } template <typename TOther> DenseRaster<uint8_t> operator||(const DenseRaster<TOther>& other) const { return perform_binary_operation<nodata::logical_or>(other); } template <typename TOther> DenseRaster<uint8_t> operator>(const DenseRaster<TOther>& other) const { return perform_binary_operation<nodata::greater>(other); } DenseRaster<uint8_t> operator>(T threshold) const { return perform_unary_operation<nodata::greater>(threshold); } template <typename TOther> DenseRaster<uint8_t> operator>=(const DenseRaster<TOther>& other) const { return perform_binary_operation<nodata::greater_equal>(other); } DenseRaster<uint8_t> operator>=(T threshold) const { return perform_unary_operation<nodata::greater_equal>(threshold); } template <typename TOther> DenseRaster<uint8_t> operator<(const DenseRaster<TOther>& other) const { return perform_binary_operation<nodata::less>(other); } DenseRaster<uint8_t> operator<(T threshold) const { return perform_unary_operation<nodata::less>(threshold); } template <typename TOther> DenseRaster<uint8_t> operator<=(const DenseRaster<TOther>& other) const { return perform_binary_operation<nodata::less_equal>(other); } DenseRaster<uint8_t> operator<=(T threshold) const { return perform_unary_operation<nodata::less_equal>(threshold); } void replace(T oldValue, T newValue) noexcept { std::replace(begin(), end(), oldValue, newValue); } std::string to_string() const { if constexpr (std::is_same_v<uint8_t, T>) { DenseRaster<uint16_t> copy(_meta); std::copy(begin(), end(), copy.begin()); return copy.to_string(); } else { std::stringstream ss; for (int i = 0; i < rows(); ++i) { std::span<const T> row(&_data[size_t(i) * cols()], cols()); ss << inf::str::join(row, ", ") << "\n"; } return ss.str(); } } void init_nodata_values() { if constexpr (raster_type_has_nan) { if (auto nodataOpt = nodata(); nodataOpt.has_value() && !std::isnan(*nodataOpt)) { simd::for_each(begin(), end(), [nod = *nodataOpt](auto& v) { v(v == nod) = NaN; }); } } } template <typename TResult = T> TResult sum() const { auto result = TResult(0); if (!nodata().has_value()) { simd::for_each(begin(), end(), [&result](const auto& v) { result += v.sum(); }); } else { if constexpr (raster_type_has_nan) { simd::for_each(begin(), end(), [&result](const auto& v) { result += v.sum(!Vc::isnan(v)); }); } else { simd::for_each(begin(), end(), [&result, nod = *nodata()](const auto& v) { result += v.sum(v != nod); }); } } return result; } private: std::size_t index(int32_t row, int32_t col) const { return row * cols() + col; } static void throw_on_datasize_mismatch(int32_t rows, int32_t cols, size_t dataSize) { if (static_cast<size_t>(size_t(rows) * cols) != dataSize) { throw InvalidArgument("Raster data size does not match provided dimensions {} vs {}x{}", dataSize, rows, cols); } } template <typename T1, typename T2> static constexpr bool floating_point_simd_supported() { return DenseRaster<T1>::simd_supported() && DenseRaster<T2>::simd_supported() && DenseRaster<T1>::has_nan() && DenseRaster<T2>::has_nan() && sizeof(T1) == sizeof(T2); } template <typename T1, typename T2> static constexpr bool integral_simd_supported() { return DenseRaster<T1>::simd_supported() && DenseRaster<T2>::simd_supported() && !DenseRaster<T1>::has_nan() && !DenseRaster<T2>::has_nan() && sizeof(T1) == sizeof(T2); } template <typename BinaryPredicate, typename TOther, typename TResult> void fp_simd_raster_operation(const DenseRaster<TOther>& other, DenseRaster<TResult>& result) const { static_assert(simd_supported() && DenseRaster<TOther>::simd_supported() && DenseRaster<TResult>::simd_supported(), "simd operation called with non supporting types"); static_assert(has_nan() && DenseRaster<TOther>::has_nan() && DenseRaster<TResult>::has_nan(), "floating point simd operation called with non floating point types"); using IsDivision = std::conditional_t<std::is_same_v<BinaryPredicate, std::divides<>>, std::true_type, std::false_type>; simd::transform(begin(), end(), other.begin(), result.begin(), [](const auto& v1, const auto& v2) { auto w1 = Vc::simd_cast<Vc::Vector<TResult, typename std::decay_t<decltype(v1)>::abi>>(v1); auto w2 = Vc::simd_cast<Vc::Vector<TResult, typename std::decay_t<decltype(v2)>::abi>>(v2); auto res = BinaryPredicate()(w1, w2); if constexpr (IsDivision::value) { res(w2 == 0) = DenseRaster<TResult>::NaN; } return res; }); } template <typename BinaryPredicate, typename TOther, typename TResult> void int_simd_raster_operation(const DenseRaster<TOther>& other, DenseRaster<TResult>& result) const { static_assert(simd_supported() && DenseRaster<TOther>::simd_supported() && DenseRaster<TResult>::simd_supported(), "simd operation called with non supporting types"); static_assert(!has_nan() && !DenseRaster<TOther>::has_nan(), "integral simd operation called with non integral types"); using IsDivision = std::conditional_t<std::is_same_v<BinaryPredicate, std::divides<>>, std::true_type, std::false_type>; if (!nodata().has_value() || !other.nodata().has_value()) { // fallback to non simd implementation for other combinations fallback_raster_operation<BinaryPredicate>(other, result); return; } // when result has nan, the nodata value should also be nan (this is only the case for divisions) if constexpr (DenseRaster<TResult>::has_nan()) { static_assert(IsDivision::value); assert(std::isnan(result.nodata().value())); } simd::transform(begin(), end(), other.begin(), result.begin(), [nod = result.nodata().value(), nod1 = nodata().value(), nod2 = other.nodata().value()](const auto& v1, const auto& v2) { auto w1 = Vc::simd_cast<Vc::Vector<TResult, typename std::decay_t<decltype(v1)>::abi>>(v1); auto w2 = Vc::simd_cast<Vc::Vector<TResult, typename std::decay_t<decltype(v2)>::abi>>(v2); if constexpr (IsDivision::value) { auto mask = w2 == 0; w2(mask) = 1; auto out = BinaryPredicate()(w1, w2); out(w1 == nod1 || w2 == nod2 || mask) = nod; return out; } else { auto out = BinaryPredicate()(w1, w2); out(w1 == nod1 || w2 == nod2) = nod; return out; } }); } template <typename BinaryPredicate, typename TOther, typename TResult> void simd_raster_operation(const DenseRaster<TOther>& other, DenseRaster<TResult>& result) const { if constexpr (floating_point_simd_supported<T, TOther>()) { fp_simd_raster_operation<BinaryPredicate>(other, result); } else if constexpr (integral_simd_supported<T, TOther>()) { int_simd_raster_operation<BinaryPredicate>(other, result); } else { fallback_raster_operation<BinaryPredicate>(other, result); } } template <typename BinaryPredicate, typename TOther, typename TResult> auto fallback_raster_operation(const DenseRaster<TOther>& other, DenseRaster<TResult>& result) const { using IsDivision = std::conditional_t<std::is_same_v<BinaryPredicate, std::divides<>>, std::true_type, std::false_type>; if (result.nodata().has_value()) { auto nod = result.nodata().value(); if constexpr (DenseRaster<TResult>::has_nan()) { nod = DenseRaster<TResult>::NaN; } for (std::size_t i = 0; i < size(); ++i) { if (is_nodata(i) || other.is_nodata(i)) { result[i] = nod; } else { if constexpr (IsDivision::value) { if (other[i] == 0) { result.mark_as_nodata(i); continue; } } result[i] = BinaryPredicate()(static_cast<TResult>(_data[i]), static_cast<TResult>(other[i])); } } } else { assert(!IsDivision::value); assert(!nodata().has_value() && !other.nodata().has_value()); // the result does not have nodata this means the input rasters also do not have nodata std::transform(cbegin(), cend(), other.cbegin(), result.begin(), [](auto& v1, auto& v2) { return BinaryPredicate()(static_cast<TResult>(v1), static_cast<TResult>(v2)); }); } } template <typename BinaryPredicate, typename TOther, typename TResult> void raster_operation(const DenseRaster<TOther>& other, DenseRaster<TResult>& result) const { constexpr bool simdSupported = simd_supported() && DenseRaster<TOther>::simd_supported(); if constexpr (simdSupported) { simd_raster_operation<BinaryPredicate>(other, result); } else { // fallback to non simd implementation for other combinations fallback_raster_operation<BinaryPredicate>(other, result); } } // Performs a unary operation on all the elements that results in true or false template <template <typename> typename BinaryPredicate, typename TOther> DenseRaster<uint8_t> perform_unary_operation(TOther value) const { DenseRaster<uint8_t> result(_meta); if (_meta.nodata.has_value()) { result.set_nodata(static_cast<double>(std::numeric_limits<uint8_t>::max())); } auto pred = BinaryPredicate<T>(_meta.nodata, std::optional<double>()); const auto size = result.size(); #pragma omp parallel for for (std::size_t i = 0; i < size; ++i) { result[i] = pred(_data[i], static_cast<T>(value)); } return result; } template <template <typename> typename UnaryPredicate> DenseRaster<uint8_t> perform_unary_operation() const { DenseRaster<uint8_t> result(_meta); if (_meta.nodata) { result.set_nodata(static_cast<double>(std::numeric_limits<uint8_t>::max())); } std::transform(cbegin(), cend(), result.begin(), UnaryPredicate<T>(_meta.nodata)); return result; } template <template <typename> typename BinaryPredicate, typename TOther> DenseRaster<uint8_t> perform_binary_operation(const DenseRaster<TOther>& other) const { throw_on_size_mismatch(*this, other); using WidestType = decltype(T() * TOther()); DenseRaster<uint8_t> result(_meta); if (_meta.nodata.has_value() || other.metadata().nodata.has_value()) { result.set_nodata(std::numeric_limits<uint8_t>::max()); } auto pred = BinaryPredicate<WidestType>(_meta.nodata, other.metadata().nodata); const auto size = result.size(); #pragma omp parallel for for (std::size_t i = 0; i < size; ++i) { result[i] = pred(static_cast<WidestType>(_data[i]), static_cast<WidestType>(other[i])); } return result; } template <typename BinaryPredicate, typename TScalar> auto perform_scalar_operation(TScalar scalar) const { using ResultType = decltype(BinaryPredicate()(T(), TScalar())); DenseRaster<ResultType> result(_meta); if constexpr (!simd_supported() || sizeof(ResultType) != sizeof(T)) { std::transform(begin(), end(), result.begin(), [this, scalar](T value) { if (is_nodata_value(value)) { return static_cast<ResultType>(value); } return BinaryPredicate()(value, scalar); }); } else if (has_nan() || !nodata().has_value()) { simd::transform(begin(), end(), result.begin(), [scalar](auto v) { using ResultVectorType = Vc::Vector<ResultType, typename decltype(v)::abi>; return BinaryPredicate()(Vc::simd_cast<ResultVectorType>(v), scalar); }); } else { assert(nodata().has_value()); simd::transform(begin(), end(), result.begin(), [scalar, nod = *nodata()](auto v) { using ResultVectorType = Vc::Vector<ResultType, typename decltype(v)::abi>; auto w = Vc::simd_cast<ResultVectorType>(v); auto out = BinaryPredicate()(w, scalar); out(w == nod) = nod; return out; }); } return result; } template <typename BinaryPredicate, typename TScalar> DenseRaster<T>& perform_scalar_operation_inplace(TScalar scalar) { static_assert(std::is_scalar_v<TScalar>, "Arithmetic operation called with non scalar type"); if constexpr (!simd_supported()) { std::for_each(begin(), end(), [this, scalar](T& value) { if (is_nodata_value(value)) { return; } value = BinaryPredicate()(value, scalar); }); } else if (has_nan() || !nodata().has_value()) { simd::for_each(begin(), end(), [scalar](auto& value) { value = BinaryPredicate()(value, scalar); }); } else { assert(nodata().has_value()); simd::for_each(begin(), end(), [scalar, nod = *nodata()](auto& value) { value(value != nod) = BinaryPredicate()(value, scalar); }); } return *this; } template <typename BinaryPredicate, typename TOther> DenseRaster<T>& perform_raster_operation_in_place(const DenseRaster<TOther>& other) { throw_on_size_mismatch(*this, other); detail::assign_nodata_value(*this, other, *this); // Division is special: divide by zero becomes nodata using IsDivision = std::conditional_t<std::is_same_v<BinaryPredicate, std::divides<>>, std::true_type, std::false_type>; if constexpr (IsDivision::value) { if (!_meta.nodata.has_value()) { _meta.nodata = detail::nodata_for_type<T>(); } } raster_operation<BinaryPredicate>(other, *this); return *this; } template <typename BinaryPredicate, typename TOther> auto perform_raster_operation(const DenseRaster<TOther>& other) const { throw_on_size_mismatch(*this, other); using IsDivision = std::conditional_t<std::is_same_v<BinaryPredicate, std::divides<>>, std::true_type, std::false_type>; using DivType = decltype(BinaryPredicate()(1.f, std::common_type_t<T, TOther>())); using Type = decltype(BinaryPredicate()(T(), TOther())); using TResult = std::conditional_t<IsDivision::value, DivType, Type>; DenseRaster<TResult> result(_meta); if constexpr (IsDivision::value) { result.set_nodata(DenseRaster<TResult>::NaN); } else { detail::assign_nodata_value(*this, other, result); } raster_operation<BinaryPredicate>(other, result); return result; } RasterMetadata _meta; data_type _data; }; template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>> DenseRaster<T> operator+(TScalar lhs, const DenseRaster<T>& rhs) { return rhs + lhs; } template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>> auto operator-(TScalar value, const DenseRaster<T>& rhs) { using ResultType = decltype(TScalar() - T()); DenseRaster<ResultType> result(rhs.metadata()); std::transform(begin(rhs), end(rhs), begin(result), nodata::minus_scalar_first<ResultType>(rhs.metadata().nodata, static_cast<ResultType>(value))); return result; } template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>> DenseRaster<T> operator*(TScalar lhs, const DenseRaster<T>& rhs) { return rhs * lhs; } template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>> auto operator/(TScalar scalar, const DenseRaster<T>& rhs) { //throw_on_size_mismatch(other); //// For nan nodata, standard eigen operator can be used //if constexpr (has_nan() && std::is_same_v<T, TOther>) { // // all types are the same, no casts needed // return DenseRaster<T>(_meta, _data / other._data); //} //return performRasterOperation<nodata::divides>(other); using ResultType = decltype(1.0f * T()); static_assert(std::is_scalar_v<T>, "Arithmetic operation called with non scalar type"); DenseRaster<ResultType> result(rhs.metadata()); for (std::size_t i = 0; i < rhs.size(); ++i) { auto value = rhs[i]; if (value == 0) { if (!result.nodata().has_value()) { throw InvalidArgument("Division by raster that contains 0 values"); } result.mark_as_nodata(i); } else { result[i] = scalar / static_cast<ResultType>(value); } } return result; } template <typename T> auto cbegin(const DenseRaster<T>& ras) { return ras.data(); } template <typename T> auto cend(const DenseRaster<T>& ras) { return ras.cend(); } template <typename T> auto begin(DenseRaster<T>& ras) { return ras.begin(); } template <typename T> auto begin(const DenseRaster<T>& ras) { return ras.begin(); } template <typename T> auto end(DenseRaster<T>& ras) { return ras.end(); } template <typename T> auto end(const DenseRaster<T>& ras) { return ras.cend(); } template <typename T> const T* data(const DenseRaster<T>& ras) { return ras.data(); } template <typename T> T* data(DenseRaster<T>& ras) { return ras.data(); } template <typename T> auto size(const DenseRaster<T>& ras) { return ras.size(); } }
TSDFVoxelGridImpl.h
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // The MIT License (MIT) // // Copyright (c) 2018-2021 www.open3d.org // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. // ---------------------------------------------------------------------------- #include <atomic> #include <cmath> #include "open3d/core/Dispatch.h" #include "open3d/core/Dtype.h" #include "open3d/core/MemoryManager.h" #include "open3d/core/SizeVector.h" #include "open3d/core/Tensor.h" #include "open3d/t/geometry/Utility.h" #include "open3d/t/geometry/kernel/GeometryIndexer.h" #include "open3d/t/geometry/kernel/GeometryMacros.h" #include "open3d/t/geometry/kernel/TSDFVoxel.h" #include "open3d/t/geometry/kernel/TSDFVoxelGrid.h" #include "open3d/utility/Logging.h" #include "open3d/utility/Timer.h" namespace open3d { namespace t { namespace geometry { namespace kernel { namespace tsdf { #if defined(__CUDACC__) void IntegrateCUDA #else void IntegrateCPU #endif (const core::Tensor& depth, const core::Tensor& color, const core::Tensor& indices, const core::Tensor& block_keys, core::Tensor& block_values, // Transforms const core::Tensor& intrinsics, const core::Tensor& extrinsics, // Parameters int64_t resolution, float voxel_size, float sdf_trunc, float depth_scale, float depth_max) { // Parameters int64_t resolution3 = resolution * resolution * resolution; // Shape / transform indexers, no data involved NDArrayIndexer voxel_indexer({resolution, resolution, resolution}); TransformIndexer transform_indexer(intrinsics, extrinsics, voxel_size); // Real data indexer NDArrayIndexer depth_indexer(depth, 2); NDArrayIndexer block_keys_indexer(block_keys, 1); NDArrayIndexer voxel_block_buffer_indexer(block_values, 4); // Optional color integration NDArrayIndexer color_indexer; bool integrate_color = false; if (color.NumElements() != 0) { color_indexer = NDArrayIndexer(color, 2); integrate_color = true; } // Plain arrays that does not require indexers const int* indices_ptr = indices.GetDataPtr<int>(); int64_t n = indices.GetLength() * resolution3; #if defined(__CUDACC__) namespace launcher = core::kernel::cuda_launcher; #else namespace launcher = core::kernel::cpu_launcher; #endif DISPATCH_BYTESIZE_TO_VOXEL( voxel_block_buffer_indexer.ElementByteSize(), [&]() { launcher::ParallelFor(n, [=] OPEN3D_DEVICE( int64_t workload_idx) { // Natural index (0, N) -> (block_idx, voxel_idx) int block_idx = indices_ptr[workload_idx / resolution3]; int voxel_idx = workload_idx % resolution3; /// Coordinate transform // block_idx -> (x_block, y_block, z_block) int* block_key_ptr = block_keys_indexer.GetDataPtr<int>(block_idx); int64_t xb = static_cast<int64_t>(block_key_ptr[0]); int64_t yb = static_cast<int64_t>(block_key_ptr[1]); int64_t zb = static_cast<int64_t>(block_key_ptr[2]); // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // coordinate in world (in voxel) int64_t x = (xb * resolution + xv); int64_t y = (yb * resolution + yv); int64_t z = (zb * resolution + zv); // coordinate in camera (in voxel -> in meter) float xc, yc, zc, u, v; transform_indexer.RigidTransform( static_cast<float>(x), static_cast<float>(y), static_cast<float>(z), &xc, &yc, &zc); // coordinate in image (in pixel) transform_indexer.Project(xc, yc, zc, &u, &v); if (!depth_indexer.InBoundary(u, v)) { return; } // Associate image workload and compute SDF and TSDF. float depth = *depth_indexer.GetDataPtr<float>( static_cast<int64_t>(u), static_cast<int64_t>(v)) / depth_scale; float sdf = (depth - zc); if (depth <= 0 || depth > depth_max || zc <= 0 || sdf < -sdf_trunc) { return; } sdf = sdf < sdf_trunc ? sdf : sdf_trunc; sdf /= sdf_trunc; // Associate voxel workload and update TSDF/Weights voxel_t* voxel_ptr = voxel_block_buffer_indexer.GetDataPtr<voxel_t>( xv, yv, zv, block_idx); if (integrate_color) { float* color_ptr = color_indexer.GetDataPtr<float>( static_cast<int64_t>(u), static_cast<int64_t>(v)); voxel_ptr->Integrate(sdf, color_ptr[0], color_ptr[1], color_ptr[2]); } else { voxel_ptr->Integrate(sdf); } }); }); #if defined(__CUDACC__) OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); #endif } #if defined(__CUDACC__) void ExtractSurfacePointsCUDA #else void ExtractSurfacePointsCPU #endif (const core::Tensor& indices, const core::Tensor& nb_indices, const core::Tensor& nb_masks, const core::Tensor& block_keys, const core::Tensor& block_values, core::Tensor& points, utility::optional<std::reference_wrapper<core::Tensor>> normals, utility::optional<std::reference_wrapper<core::Tensor>> colors, int64_t resolution, float voxel_size, float weight_threshold, int& valid_size) { // Parameters int64_t resolution3 = resolution * resolution * resolution; // Shape / transform indexers, no data involved NDArrayIndexer voxel_indexer({resolution, resolution, resolution}); // Real data indexer NDArrayIndexer voxel_block_buffer_indexer(block_values, 4); NDArrayIndexer block_keys_indexer(block_keys, 1); NDArrayIndexer nb_block_masks_indexer(nb_masks, 2); NDArrayIndexer nb_block_indices_indexer(nb_indices, 2); // Plain arrays that does not require indexers const int64_t* indices_ptr = indices.GetDataPtr<int64_t>(); int64_t n_blocks = indices.GetLength(); int64_t n = n_blocks * resolution3; // Output #if defined(__CUDACC__) core::Tensor count(std::vector<int>{0}, {1}, core::Dtype::Int32, block_values.GetDevice()); int* count_ptr = count.GetDataPtr<int>(); #else std::atomic<int> count_atomic(0); std::atomic<int>* count_ptr = &count_atomic; #endif #if defined(__CUDACC__) namespace launcher = core::kernel::cuda_launcher; #else namespace launcher = core::kernel::cpu_launcher; #endif if (valid_size < 0) { utility::LogWarning( "No estimated max point cloud size provided, using a 2-pass " "estimation. Surface extraction could be slow."); // This pass determines valid number of points. DISPATCH_BYTESIZE_TO_VOXEL( voxel_block_buffer_indexer.ElementByteSize(), [&]() { launcher::ParallelFor(n, [=] OPEN3D_DEVICE( int64_t workload_idx) { auto GetVoxelAt = [&] OPEN3D_DEVICE( int xo, int yo, int zo, int curr_block_idx) -> voxel_t* { return DeviceGetVoxelAt<voxel_t>( xo, yo, zo, curr_block_idx, static_cast<int>(resolution), nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; // Natural index (0, N) -> (block_idx, // voxel_idx) int64_t workload_block_idx = workload_idx / resolution3; int64_t block_idx = indices_ptr[workload_block_idx]; int64_t voxel_idx = workload_idx % resolution3; // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); voxel_t* voxel_ptr = voxel_block_buffer_indexer.GetDataPtr<voxel_t>( xv, yv, zv, block_idx); float tsdf_o = voxel_ptr->GetTSDF(); float weight_o = voxel_ptr->GetWeight(); if (weight_o <= weight_threshold) return; // Enumerate x-y-z directions for (int i = 0; i < 3; ++i) { voxel_t* ptr = GetVoxelAt( static_cast<int>(xv) + (i == 0), static_cast<int>(yv) + (i == 1), static_cast<int>(zv) + (i == 2), static_cast<int>(workload_block_idx)); if (ptr == nullptr) continue; float tsdf_i = ptr->GetTSDF(); float weight_i = ptr->GetWeight(); if (weight_i > weight_threshold && tsdf_i * tsdf_o < 0) { OPEN3D_ATOMIC_ADD(count_ptr, 1); } } }); }); #if defined(__CUDACC__) valid_size = count[0].Item<int>(); count[0] = 0; #else valid_size = (*count_ptr).load(); (*count_ptr) = 0; #endif } int max_count = valid_size; if (points.GetLength() == 0) { points = core::Tensor({max_count, 3}, core::Dtype::Float32, block_values.GetDevice()); } NDArrayIndexer point_indexer(points, 1); // Normals bool extract_normal = false; NDArrayIndexer normal_indexer; if (normals.has_value()) { extract_normal = true; if (normals.value().get().GetLength() == 0) { normals.value().get() = core::Tensor({max_count, 3}, core::Dtype::Float32, block_values.GetDevice()); } normal_indexer = NDArrayIndexer(normals.value().get(), 1); } // This pass extracts exact surface points. DISPATCH_BYTESIZE_TO_VOXEL( voxel_block_buffer_indexer.ElementByteSize(), [&]() { // Colors bool extract_color = false; NDArrayIndexer color_indexer; if (voxel_t::HasColor() && colors.has_value()) { extract_color = true; if (colors.value().get().GetLength() == 0) { colors.value().get() = core::Tensor( {max_count, 3}, core::Dtype::Float32, block_values.GetDevice()); } color_indexer = NDArrayIndexer(colors.value().get(), 1); } launcher::ParallelFor(n, [=] OPEN3D_DEVICE( int64_t workload_idx) { auto GetVoxelAt = [&] OPEN3D_DEVICE( int xo, int yo, int zo, int curr_block_idx) -> voxel_t* { return DeviceGetVoxelAt<voxel_t>( xo, yo, zo, curr_block_idx, static_cast<int>(resolution), nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; auto GetNormalAt = [&] OPEN3D_DEVICE(int xo, int yo, int zo, int curr_block_idx, float* n) { return DeviceGetNormalAt<voxel_t>( xo, yo, zo, curr_block_idx, n, static_cast<int>(resolution), voxel_size, nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; // Natural index (0, N) -> (block_idx, voxel_idx) int64_t workload_block_idx = workload_idx / resolution3; int64_t block_idx = indices_ptr[workload_block_idx]; int64_t voxel_idx = workload_idx % resolution3; /// Coordinate transform // block_idx -> (x_block, y_block, z_block) int* block_key_ptr = block_keys_indexer.GetDataPtr<int>(block_idx); int64_t xb = static_cast<int64_t>(block_key_ptr[0]); int64_t yb = static_cast<int64_t>(block_key_ptr[1]); int64_t zb = static_cast<int64_t>(block_key_ptr[2]); // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); voxel_t* voxel_ptr = voxel_block_buffer_indexer.GetDataPtr<voxel_t>( xv, yv, zv, block_idx); float tsdf_o = voxel_ptr->GetTSDF(); float weight_o = voxel_ptr->GetWeight(); if (weight_o <= weight_threshold) return; int64_t x = xb * resolution + xv; int64_t y = yb * resolution + yv; int64_t z = zb * resolution + zv; float no[3] = {0}, ni[3] = {0}; if (extract_normal) { GetNormalAt(static_cast<int>(xv), static_cast<int>(yv), static_cast<int>(zv), static_cast<int>(workload_block_idx), no); } // Enumerate x-y-z axis for (int i = 0; i < 3; ++i) { voxel_t* ptr = GetVoxelAt( static_cast<int>(xv) + (i == 0), static_cast<int>(yv) + (i == 1), static_cast<int>(zv) + (i == 2), static_cast<int>(workload_block_idx)); if (ptr == nullptr) continue; float tsdf_i = ptr->GetTSDF(); float weight_i = ptr->GetWeight(); if (weight_i > weight_threshold && tsdf_i * tsdf_o < 0) { float ratio = (0 - tsdf_o) / (tsdf_i - tsdf_o); int idx = OPEN3D_ATOMIC_ADD(count_ptr, 1); if (idx >= valid_size) { printf("Point cloud size larger than " "estimated, please increase the " "estimation!\n"); return; } float* point_ptr = point_indexer.GetDataPtr<float>(idx); point_ptr[0] = voxel_size * (x + ratio * int(i == 0)); point_ptr[1] = voxel_size * (y + ratio * int(i == 1)); point_ptr[2] = voxel_size * (z + ratio * int(i == 2)); if (extract_color) { float* color_ptr = color_indexer.GetDataPtr<float>(idx); float r_o = voxel_ptr->GetR(); float g_o = voxel_ptr->GetG(); float b_o = voxel_ptr->GetB(); float r_i = ptr->GetR(); float g_i = ptr->GetG(); float b_i = ptr->GetB(); color_ptr[0] = ((1 - ratio) * r_o + ratio * r_i) / 255.0f; color_ptr[1] = ((1 - ratio) * g_o + ratio * g_i) / 255.0f; color_ptr[2] = ((1 - ratio) * b_o + ratio * b_i) / 255.0f; } if (extract_normal) { GetNormalAt( static_cast<int>(xv) + (i == 0), static_cast<int>(yv) + (i == 1), static_cast<int>(zv) + (i == 2), static_cast<int>(workload_block_idx), ni); float* normal_ptr = normal_indexer.GetDataPtr<float>(idx); float nx = (1 - ratio) * no[0] + ratio * ni[0]; float ny = (1 - ratio) * no[1] + ratio * ni[1]; float nz = (1 - ratio) * no[2] + ratio * ni[2]; float norm = static_cast<float>( sqrt(nx * nx + ny * ny + nz * nz) + 1e-5); normal_ptr[0] = nx / norm; normal_ptr[1] = ny / norm; normal_ptr[2] = nz / norm; } } } }); }); #if defined(__CUDACC__) int total_count = count.Item<int>(); #else int total_count = (*count_ptr).load(); #endif utility::LogDebug("{} vertices extracted", total_count); valid_size = total_count; #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__) OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); #endif } #if defined(__CUDACC__) void ExtractSurfaceMeshCUDA #else void ExtractSurfaceMeshCPU #endif (const core::Tensor& indices, const core::Tensor& inv_indices, const core::Tensor& nb_indices, const core::Tensor& nb_masks, const core::Tensor& block_keys, const core::Tensor& block_values, core::Tensor& vertices, core::Tensor& triangles, utility::optional<std::reference_wrapper<core::Tensor>> normals, utility::optional<std::reference_wrapper<core::Tensor>> colors, int64_t resolution, float voxel_size, float weight_threshold, int& vertex_count) { int64_t resolution3 = resolution * resolution * resolution; // Shape / transform indexers, no data involved NDArrayIndexer voxel_indexer({resolution, resolution, resolution}); int n_blocks = static_cast<int>(indices.GetLength()); #if defined(__CUDACC__) core::CUDACachedMemoryManager::ReleaseCache(); #endif // TODO(wei): profile performance by replacing the table to a hashmap. // Voxel-wise mesh info. 4 channels correspond to: // 3 edges' corresponding vertex index + 1 table index. core::Tensor mesh_structure; try { mesh_structure = core::Tensor::Zeros( {n_blocks, resolution, resolution, resolution, 4}, core::Dtype::Int32, block_keys.GetDevice()); } catch (const std::runtime_error&) { utility::LogError( "[MeshExtractionKernel] Unable to allocate assistance mesh " "structure for Marching " "Cubes with {} active voxel blocks. Please consider using a " "larger voxel size (currently {}) for TSDF " "integration, or using tsdf_volume.cpu() to perform mesh " "extraction on CPU.", n_blocks, voxel_size); } // Real data indexer NDArrayIndexer voxel_block_buffer_indexer(block_values, 4); NDArrayIndexer mesh_structure_indexer(mesh_structure, 4); NDArrayIndexer nb_block_masks_indexer(nb_masks, 2); NDArrayIndexer nb_block_indices_indexer(nb_indices, 2); // Plain arrays that does not require indexers const int64_t* indices_ptr = indices.GetDataPtr<int64_t>(); const int64_t* inv_indices_ptr = inv_indices.GetDataPtr<int64_t>(); int64_t n = n_blocks * resolution3; #if defined(__CUDACC__) namespace launcher = core::kernel::cuda_launcher; #else namespace launcher = core::kernel::cpu_launcher; #endif int64_t voxel_bytesize = voxel_block_buffer_indexer.ElementByteSize(); // Pass 0: analyze mesh structure, set up one-on-one correspondences // from edges to vertices. DISPATCH_BYTESIZE_TO_VOXEL(voxel_bytesize, [&]() { launcher::ParallelFor(n, [=] OPEN3D_DEVICE(int64_t widx) { auto GetVoxelAt = [&] OPEN3D_DEVICE( int xo, int yo, int zo, int curr_block_idx) -> voxel_t* { return DeviceGetVoxelAt<voxel_t>( xo, yo, zo, curr_block_idx, static_cast<int>(resolution), nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; // Natural index (0, N) -> (block_idx, voxel_idx) int64_t workload_block_idx = widx / resolution3; int64_t voxel_idx = widx % resolution3; // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // Check per-vertex sign in the cube to determine cube // type int table_idx = 0; for (int i = 0; i < 8; ++i) { voxel_t* voxel_ptr_i = GetVoxelAt(static_cast<int>(xv) + vtx_shifts[i][0], static_cast<int>(yv) + vtx_shifts[i][1], static_cast<int>(zv) + vtx_shifts[i][2], static_cast<int>(workload_block_idx)); if (voxel_ptr_i == nullptr) return; float tsdf_i = voxel_ptr_i->GetTSDF(); float weight_i = voxel_ptr_i->GetWeight(); if (weight_i <= weight_threshold) return; table_idx |= ((tsdf_i < 0) ? (1 << i) : 0); } int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<int>( xv, yv, zv, workload_block_idx); mesh_struct_ptr[3] = table_idx; if (table_idx == 0 || table_idx == 255) return; // Check per-edge sign determine the cube type int edges_with_vertices = edge_table[table_idx]; for (int i = 0; i < 12; ++i) { if (edges_with_vertices & (1 << i)) { int64_t xv_i = xv + edge_shifts[i][0]; int64_t yv_i = yv + edge_shifts[i][1]; int64_t zv_i = zv + edge_shifts[i][2]; int edge_i = edge_shifts[i][3]; int dxb = static_cast<int>(xv_i / resolution); int dyb = static_cast<int>(yv_i / resolution); int dzb = static_cast<int>(zv_i / resolution); int nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9; int64_t block_idx_i = *nb_block_indices_indexer.GetDataPtr<int64_t>( workload_block_idx, nb_idx); int* mesh_ptr_i = mesh_structure_indexer.GetDataPtr<int>( xv_i - dxb * resolution, yv_i - dyb * resolution, zv_i - dzb * resolution, inv_indices_ptr[block_idx_i]); // Non-atomic write, but we are safe mesh_ptr_i[edge_i] = -1; } } }); }); // Pass 1: determine valid number of vertices (if not preset) #if defined(__CUDACC__) core::Tensor count(std::vector<int>{0}, {}, core::Dtype::Int32, block_values.GetDevice()); int* count_ptr = count.GetDataPtr<int>(); #else std::atomic<int> count_atomic(0); std::atomic<int>* count_ptr = &count_atomic; #endif if (vertex_count < 0) { launcher::ParallelFor(n, [=] OPEN3D_DEVICE(int64_t widx) { // Natural index (0, N) -> (block_idx, voxel_idx) int64_t workload_block_idx = widx / resolution3; int64_t voxel_idx = widx % resolution3; // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // Obtain voxel's mesh struct ptr int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<int>( xv, yv, zv, workload_block_idx); // Early quit -- no allocated vertex to compute if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 && mesh_struct_ptr[2] != -1) { return; } // Enumerate 3 edges in the voxel for (int e = 0; e < 3; ++e) { int vertex_idx = mesh_struct_ptr[e]; if (vertex_idx != -1) continue; OPEN3D_ATOMIC_ADD(count_ptr, 1); } }); #if defined(__CUDACC__) vertex_count = count.Item<int>(); #else vertex_count = (*count_ptr).load(); #endif } utility::LogDebug("Total vertex count = {}", vertex_count); vertices = core::Tensor({vertex_count, 3}, core::Dtype::Float32, block_values.GetDevice()); bool extract_normal = false; NDArrayIndexer normal_indexer; if (normals.has_value()) { extract_normal = true; normals.value().get() = core::Tensor({vertex_count, 3}, core::Dtype::Float32, block_values.GetDevice()); normal_indexer = NDArrayIndexer(normals.value().get(), 1); } NDArrayIndexer block_keys_indexer(block_keys, 1); NDArrayIndexer vertex_indexer(vertices, 1); #if defined(__CUDACC__) count = core::Tensor(std::vector<int>{0}, {}, core::Dtype::Int32, block_values.GetDevice()); count_ptr = count.GetDataPtr<int>(); #else (*count_ptr) = 0; #endif // Pass 2: extract vertices. DISPATCH_BYTESIZE_TO_VOXEL(voxel_bytesize, [&]() { bool extract_color = false; NDArrayIndexer color_indexer; if (voxel_t::HasColor() && colors.has_value()) { extract_color = true; colors.value().get() = core::Tensor({vertex_count, 3}, core::Dtype::Float32, block_values.GetDevice()); color_indexer = NDArrayIndexer(colors.value().get(), 1); } launcher::ParallelFor(n, [=] OPEN3D_DEVICE(int64_t widx) { auto GetVoxelAt = [&] OPEN3D_DEVICE( int xo, int yo, int zo, int curr_block_idx) -> voxel_t* { return DeviceGetVoxelAt<voxel_t>( xo, yo, zo, curr_block_idx, static_cast<int>(resolution), nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; auto GetNormalAt = [&] OPEN3D_DEVICE(int xo, int yo, int zo, int curr_block_idx, float* n) { return DeviceGetNormalAt<voxel_t>( xo, yo, zo, curr_block_idx, n, static_cast<int>(resolution), voxel_size, nb_block_masks_indexer, nb_block_indices_indexer, voxel_block_buffer_indexer); }; // Natural index (0, N) -> (block_idx, voxel_idx) int64_t workload_block_idx = widx / resolution3; int64_t block_idx = indices_ptr[workload_block_idx]; int64_t voxel_idx = widx % resolution3; // block_idx -> (x_block, y_block, z_block) int* block_key_ptr = block_keys_indexer.GetDataPtr<int>(block_idx); int64_t xb = static_cast<int64_t>(block_key_ptr[0]); int64_t yb = static_cast<int64_t>(block_key_ptr[1]); int64_t zb = static_cast<int64_t>(block_key_ptr[2]); // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // global coordinate (in voxels) int64_t x = xb * resolution + xv; int64_t y = yb * resolution + yv; int64_t z = zb * resolution + zv; // Obtain voxel's mesh struct ptr int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<int>( xv, yv, zv, workload_block_idx); // Early quit -- no allocated vertex to compute if (mesh_struct_ptr[0] != -1 && mesh_struct_ptr[1] != -1 && mesh_struct_ptr[2] != -1) { return; } // Obtain voxel ptr voxel_t* voxel_ptr = voxel_block_buffer_indexer.GetDataPtr<voxel_t>( xv, yv, zv, block_idx); float tsdf_o = voxel_ptr->GetTSDF(); float no[3] = {0}, ne[3] = {0}; if (extract_normal) { GetNormalAt(static_cast<int>(xv), static_cast<int>(yv), static_cast<int>(zv), static_cast<int>(workload_block_idx), no); } // Enumerate 3 edges in the voxel for (int e = 0; e < 3; ++e) { int vertex_idx = mesh_struct_ptr[e]; if (vertex_idx != -1) continue; voxel_t* voxel_ptr_e = GetVoxelAt(static_cast<int>(xv) + (e == 0), static_cast<int>(yv) + (e == 1), static_cast<int>(zv) + (e == 2), static_cast<int>(workload_block_idx)); OPEN3D_ASSERT(voxel_ptr_e != nullptr && "Internal error: GetVoxelAt returns nullptr."); float tsdf_e = voxel_ptr_e->GetTSDF(); float ratio = (0 - tsdf_o) / (tsdf_e - tsdf_o); int idx = OPEN3D_ATOMIC_ADD(count_ptr, 1); mesh_struct_ptr[e] = idx; float ratio_x = ratio * int(e == 0); float ratio_y = ratio * int(e == 1); float ratio_z = ratio * int(e == 2); float* vertex_ptr = vertex_indexer.GetDataPtr<float>(idx); vertex_ptr[0] = voxel_size * (x + ratio_x); vertex_ptr[1] = voxel_size * (y + ratio_y); vertex_ptr[2] = voxel_size * (z + ratio_z); if (extract_normal) { float* normal_ptr = normal_indexer.GetDataPtr<float>(idx); GetNormalAt(static_cast<int>(xv) + (e == 0), static_cast<int>(yv) + (e == 1), static_cast<int>(zv) + (e == 2), static_cast<int>(workload_block_idx), ne); float nx = (1 - ratio) * no[0] + ratio * ne[0]; float ny = (1 - ratio) * no[1] + ratio * ne[1]; float nz = (1 - ratio) * no[2] + ratio * ne[2]; float norm = static_cast<float>( sqrt(nx * nx + ny * ny + nz * nz) + 1e-5); normal_ptr[0] = nx / norm; normal_ptr[1] = ny / norm; normal_ptr[2] = nz / norm; } if (extract_color) { float* color_ptr = color_indexer.GetDataPtr<float>(idx); float r_o = voxel_ptr->GetR(); float g_o = voxel_ptr->GetG(); float b_o = voxel_ptr->GetB(); float r_e = voxel_ptr_e->GetR(); float g_e = voxel_ptr_e->GetG(); float b_e = voxel_ptr_e->GetB(); color_ptr[0] = ((1 - ratio) * r_o + ratio * r_e) / 255.0f; color_ptr[1] = ((1 - ratio) * g_o + ratio * g_e) / 255.0f; color_ptr[2] = ((1 - ratio) * b_o + ratio * b_e) / 255.0f; } } }); }); // Pass 3: connect vertices and form triangles. int triangle_count = vertex_count * 3; triangles = core::Tensor({triangle_count, 3}, core::Dtype::Int64, block_values.GetDevice()); NDArrayIndexer triangle_indexer(triangles, 1); #if defined(__CUDACC__) count = core::Tensor(std::vector<int>{0}, {}, core::Dtype::Int32, block_values.GetDevice()); count_ptr = count.GetDataPtr<int>(); #else (*count_ptr) = 0; #endif launcher::ParallelFor(n, [=] OPEN3D_DEVICE(int64_t widx) { // Natural index (0, N) -> (block_idx, voxel_idx) int64_t workload_block_idx = widx / resolution3; int64_t voxel_idx = widx % resolution3; // voxel_idx -> (x_voxel, y_voxel, z_voxel) int64_t xv, yv, zv; voxel_indexer.WorkloadToCoord(voxel_idx, &xv, &yv, &zv); // Obtain voxel's mesh struct ptr int* mesh_struct_ptr = mesh_structure_indexer.GetDataPtr<int>( xv, yv, zv, workload_block_idx); int table_idx = mesh_struct_ptr[3]; if (tri_count[table_idx] == 0) return; for (size_t tri = 0; tri < 16; tri += 3) { if (tri_table[table_idx][tri] == -1) return; int tri_idx = OPEN3D_ATOMIC_ADD(count_ptr, 1); for (size_t vertex = 0; vertex < 3; ++vertex) { int edge = tri_table[table_idx][tri + vertex]; int64_t xv_i = xv + edge_shifts[edge][0]; int64_t yv_i = yv + edge_shifts[edge][1]; int64_t zv_i = zv + edge_shifts[edge][2]; int64_t edge_i = edge_shifts[edge][3]; int dxb = static_cast<int>(xv_i / resolution); int dyb = static_cast<int>(yv_i / resolution); int dzb = static_cast<int>(zv_i / resolution); int nb_idx = (dxb + 1) + (dyb + 1) * 3 + (dzb + 1) * 9; int64_t block_idx_i = *nb_block_indices_indexer.GetDataPtr<int64_t>( workload_block_idx, nb_idx); int* mesh_struct_ptr_i = mesh_structure_indexer.GetDataPtr<int>( xv_i - dxb * resolution, yv_i - dyb * resolution, zv_i - dzb * resolution, inv_indices_ptr[block_idx_i]); int64_t* triangle_ptr = triangle_indexer.GetDataPtr<int64_t>(tri_idx); triangle_ptr[2 - vertex] = mesh_struct_ptr_i[edge_i]; } } }); #if defined(__CUDACC__) triangle_count = count.Item<int>(); #else triangle_count = (*count_ptr).load(); #endif utility::LogInfo("Total triangle count = {}", triangle_count); triangles = triangles.Slice(0, 0, triangle_count); } #if defined(__CUDACC__) void EstimateRangeCUDA #else void EstimateRangeCPU #endif (const core::Tensor& block_keys, core::Tensor& range_minmax_map, const core::Tensor& intrinsics, const core::Tensor& extrinsics, int h, int w, int down_factor, int64_t block_resolution, float voxel_size, float depth_min, float depth_max) { // TODO(wei): reserve it in a reusable buffer // Every 2 channels: (min, max) int h_down = h / down_factor; int w_down = w / down_factor; range_minmax_map = core::Tensor({h_down, w_down, 2}, core::Dtype::Float32, block_keys.GetDevice()); NDArrayIndexer range_map_indexer(range_minmax_map, 2); // Every 6 channels: (v_min, u_min, v_max, u_max, z_min, z_max) const int fragment_size = 16; const int frag_buffer_size = 65535; // TODO(wei): explicit buffer core::Tensor fragment_buffer = core::Tensor({frag_buffer_size, 6}, core::Dtype::Float32, block_keys.GetDevice()); NDArrayIndexer frag_buffer_indexer(fragment_buffer, 1); NDArrayIndexer block_keys_indexer(block_keys, 1); TransformIndexer w2c_transform_indexer(intrinsics, extrinsics); #if defined(__CUDACC__) core::Tensor count(std::vector<int>{0}, {1}, core::Dtype::Int32, block_keys.GetDevice()); int* count_ptr = count.GetDataPtr<int>(); #else std::atomic<int> count_atomic(0); std::atomic<int>* count_ptr = &count_atomic; #endif #if defined(__CUDACC__) namespace launcher = core::kernel::cuda_launcher; #else namespace launcher = core::kernel::cpu_launcher; using std::max; using std::min; #endif // Pass 0: iterate over blocks, fill-in an rendering fragment array launcher::ParallelFor( block_keys.GetLength(), [=] OPEN3D_DEVICE(int64_t workload_idx) { int* key = block_keys_indexer.GetDataPtr<int>(workload_idx); int u_min = w_down - 1, v_min = h_down - 1, u_max = 0, v_max = 0; float z_min = depth_max, z_max = depth_min; float xc, yc, zc, u, v; // Project 8 corners to low-res image and form a rectangle for (int i = 0; i < 8; ++i) { float xw = (key[0] + ((i & 1) > 0)) * block_resolution * voxel_size; float yw = (key[1] + ((i & 2) > 0)) * block_resolution * voxel_size; float zw = (key[2] + ((i & 4) > 0)) * block_resolution * voxel_size; w2c_transform_indexer.RigidTransform(xw, yw, zw, &xc, &yc, &zc); if (zc <= 0) continue; // Project to the down sampled image buffer w2c_transform_indexer.Project(xc, yc, zc, &u, &v); u /= down_factor; v /= down_factor; v_min = min(static_cast<int>(floorf(v)), v_min); v_max = max(static_cast<int>(ceilf(v)), v_max); u_min = min(static_cast<int>(floorf(u)), u_min); u_max = max(static_cast<int>(ceilf(u)), u_max); z_min = min(z_min, zc); z_max = max(z_max, zc); } v_min = max(0, v_min); v_max = min(h_down - 1, v_max); u_min = max(0, u_min); u_max = min(w_down - 1, u_max); if (v_min >= v_max || u_min >= u_max || z_min >= z_max) return; // Divide the rectangle into small 16x16 fragments int frag_v_count = ceilf(float(v_max - v_min + 1) / float(fragment_size)); int frag_u_count = ceilf(float(u_max - u_min + 1) / float(fragment_size)); int frag_count = frag_v_count * frag_u_count; int frag_count_start = OPEN3D_ATOMIC_ADD(count_ptr, 1); int frag_count_end = frag_count_start + frag_count; if (frag_count_end >= frag_buffer_size) { printf("Fragment count exceeding buffer size, abort!\n"); } int offset = 0; for (int frag_v = 0; frag_v < frag_v_count; ++frag_v) { for (int frag_u = 0; frag_u < frag_u_count; ++frag_u, ++offset) { float* frag_ptr = frag_buffer_indexer.GetDataPtr<float>( frag_count_start + offset); // zmin, zmax frag_ptr[0] = z_min; frag_ptr[1] = z_max; // vmin, umin frag_ptr[2] = v_min + frag_v * fragment_size; frag_ptr[3] = u_min + frag_u * fragment_size; // vmax, umax frag_ptr[4] = min(frag_ptr[2] + fragment_size - 1, static_cast<float>(v_max)); frag_ptr[5] = min(frag_ptr[3] + fragment_size - 1, static_cast<float>(u_max)); } } }); #if defined(__CUDACC__) int frag_count = count[0].Item<int>(); #else int frag_count = (*count_ptr).load(); #endif // Pass 0.5: Fill in range map to prepare for atomic min/max launcher::ParallelFor( h_down * w_down, [=] OPEN3D_DEVICE(int64_t workload_idx) { int v = workload_idx / w_down; int u = workload_idx % w_down; float* range_ptr = range_map_indexer.GetDataPtr<float>(u, v); range_ptr[0] = depth_max; range_ptr[1] = depth_min; }); // Pass 1: iterate over rendering fragment array, fill-in range launcher::ParallelFor( frag_count * fragment_size * fragment_size, [=] OPEN3D_DEVICE(int64_t workload_idx) { int frag_idx = workload_idx / (fragment_size * fragment_size); int local_idx = workload_idx % (fragment_size * fragment_size); int dv = local_idx / fragment_size; int du = local_idx % fragment_size; float* frag_ptr = frag_buffer_indexer.GetDataPtr<float>(frag_idx); int v_min = static_cast<int>(frag_ptr[2]); int u_min = static_cast<int>(frag_ptr[3]); int v_max = static_cast<int>(frag_ptr[4]); int u_max = static_cast<int>(frag_ptr[5]); int v = v_min + dv; int u = u_min + du; if (v > v_max || u > u_max) return; float z_min = frag_ptr[0]; float z_max = frag_ptr[1]; float* range_ptr = range_map_indexer.GetDataPtr<float>(u, v); #ifdef __CUDACC__ atomicMinf(&(range_ptr[0]), z_min); atomicMaxf(&(range_ptr[1]), z_max); #else #pragma omp critical(EstimateRangeCPU) { range_ptr[0] = min(z_min, range_ptr[0]); range_ptr[1] = max(z_max, range_ptr[1]); } #endif }); #if defined(__CUDACC__) OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); #endif } struct BlockCache { int x; int y; int z; int block_idx; inline int OPEN3D_DEVICE Check(int xin, int yin, int zin) { return (xin == x && yin == y && zin == z) ? block_idx : -1; } inline void OPEN3D_DEVICE Update(int xin, int yin, int zin, int block_idx_in) { x = xin; y = yin; z = zin; block_idx = block_idx_in; } }; #if defined(__CUDACC__) void RayCastCUDA #else void RayCastCPU #endif (std::shared_ptr<core::DeviceHashmap>& hashmap, const core::Tensor& block_values, const core::Tensor& range_map, core::Tensor& vertex_map, core::Tensor& depth_map, core::Tensor& color_map, core::Tensor& normal_map, const core::Tensor& intrinsics, const core::Tensor& extrinsics, int h, int w, int64_t block_resolution, float voxel_size, float sdf_trunc, float depth_scale, float depth_min, float depth_max, float weight_threshold) { using Key = core::Block<int, 3>; using Hash = core::BlockHash<int, 3>; #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__) auto cuda_hashmap = std::dynamic_pointer_cast<core::StdGPUHashmap<Key, Hash>>(hashmap); if (cuda_hashmap == nullptr) { utility::LogError( "Unsupported backend: CUDA raycasting only supports STDGPU."); } auto hashmap_impl = cuda_hashmap->GetImpl(); #else auto cpu_hashmap = std::dynamic_pointer_cast<core::TBBHashmap<Key, Hash>>(hashmap); auto hashmap_impl = *cpu_hashmap->GetImpl(); #endif NDArrayIndexer voxel_block_buffer_indexer(block_values, 4); NDArrayIndexer range_map_indexer(range_map, 2); NDArrayIndexer vertex_map_indexer; NDArrayIndexer depth_map_indexer; NDArrayIndexer color_map_indexer; NDArrayIndexer normal_map_indexer; bool enable_vertex = (vertex_map.GetLength() != 0); bool enable_depth = (depth_map.GetLength() != 0); bool enable_color = (color_map.GetLength() != 0); bool enable_normal = (normal_map.GetLength() != 0); if (!enable_vertex && !enable_depth && !enable_color && !enable_normal) { utility::LogWarning("No output specified for ray casting, exit."); return; } if (enable_vertex) { vertex_map_indexer = NDArrayIndexer(vertex_map, 2); } if (enable_depth) { depth_map_indexer = NDArrayIndexer(depth_map, 2); } if (enable_color) { color_map_indexer = NDArrayIndexer(color_map, 2); } if (enable_normal) { normal_map_indexer = NDArrayIndexer(normal_map, 2); } TransformIndexer c2w_transform_indexer( intrinsics, t::geometry::InverseTransformation(extrinsics)); TransformIndexer w2c_transform_indexer(intrinsics, extrinsics); int64_t rows = h; int64_t cols = w; float block_size = voxel_size * block_resolution; #if defined(__CUDACC__) namespace launcher = core::kernel::cuda_launcher; #else namespace launcher = core::kernel::cpu_launcher; using std::max; #endif DISPATCH_BYTESIZE_TO_VOXEL( voxel_block_buffer_indexer.ElementByteSize(), [&]() { launcher::ParallelFor(rows * cols, [=] OPEN3D_DEVICE( int64_t workload_idx) { auto GetVoxelAtP = [&] OPEN3D_DEVICE( int x_b, int y_b, int z_b, int x_v, int y_v, int z_v, core::addr_t block_addr, BlockCache& cache) -> voxel_t* { int x_vn = (x_v + block_resolution) % block_resolution; int y_vn = (y_v + block_resolution) % block_resolution; int z_vn = (z_v + block_resolution) % block_resolution; int dx_b = Sign(x_v - x_vn); int dy_b = Sign(y_v - y_vn); int dz_b = Sign(z_v - z_vn); if (dx_b == 0 && dy_b == 0 && dz_b == 0) { return voxel_block_buffer_indexer .GetDataPtr<voxel_t>(x_v, y_v, z_v, block_addr); } else { Key key; key.Set(0, x_b + dx_b); key.Set(1, y_b + dy_b); key.Set(2, z_b + dz_b); int block_addr = cache.Check(key.Get(0), key.Get(1), key.Get(2)); if (block_addr < 0) { auto iter = hashmap_impl.find(key); if (iter == hashmap_impl.end()) return nullptr; block_addr = iter->second; cache.Update(key.Get(0), key.Get(1), key.Get(2), block_addr); } return voxel_block_buffer_indexer .GetDataPtr<voxel_t>(x_vn, y_vn, z_vn, block_addr); } }; auto GetVoxelAtT = [&] OPEN3D_DEVICE( float x_o, float y_o, float z_o, float x_d, float y_d, float z_d, float t, BlockCache& cache) -> voxel_t* { float x_g = x_o + t * x_d; float y_g = y_o + t * y_d; float z_g = z_o + t * z_d; // Block coordinate and look up int x_b = static_cast<int>(floorf(x_g / block_size)); int y_b = static_cast<int>(floorf(y_g / block_size)); int z_b = static_cast<int>(floorf(z_g / block_size)); Key key; key.Set(0, x_b); key.Set(1, y_b); key.Set(2, z_b); int block_addr = cache.Check(x_b, y_b, z_b); if (block_addr < 0) { auto iter = hashmap_impl.find(key); if (iter == hashmap_impl.end()) return nullptr; block_addr = iter->second; cache.Update(x_b, y_b, z_b, block_addr); } // Voxel coordinate and look up int x_v = int((x_g - x_b * block_size) / voxel_size); int y_v = int((y_g - y_b * block_size) / voxel_size); int z_v = int((z_g - z_b * block_size) / voxel_size); return voxel_block_buffer_indexer.GetDataPtr<voxel_t>( x_v, y_v, z_v, block_addr); }; int64_t y = workload_idx / cols; int64_t x = workload_idx % cols; float *depth_ptr = nullptr, *vertex_ptr = nullptr, *normal_ptr = nullptr, *color_ptr = nullptr; if (enable_depth) { depth_ptr = depth_map_indexer.GetDataPtr<float>(x, y); *depth_ptr = 0; } if (enable_vertex) { vertex_ptr = vertex_map_indexer.GetDataPtr<float>(x, y); vertex_ptr[0] = 0; vertex_ptr[1] = 0; vertex_ptr[2] = 0; } if (enable_color) { color_ptr = color_map_indexer.GetDataPtr<float>(x, y); color_ptr[0] = 0; color_ptr[1] = 0; color_ptr[2] = 0; } if (enable_normal) { normal_ptr = normal_map_indexer.GetDataPtr<float>(x, y); normal_ptr[0] = 0; normal_ptr[1] = 0; normal_ptr[2] = 0; } const float* range = range_map_indexer.GetDataPtr<float>(x / 8, y / 8); float t = range[0]; const float t_max = range[1]; if (t >= t_max) return; // Coordinates in camera and global float x_c = 0, y_c = 0, z_c = 0; float x_g = 0, y_g = 0, z_g = 0; float x_o = 0, y_o = 0, z_o = 0; // Iterative ray intersection check float t_prev = t; float tsdf_prev = -1.0f; float tsdf = 1.0; float w = 0.0; // Camera origin c2w_transform_indexer.RigidTransform(0, 0, 0, &x_o, &y_o, &z_o); // Direction c2w_transform_indexer.Unproject(static_cast<float>(x), static_cast<float>(y), 1.0f, &x_c, &y_c, &z_c); c2w_transform_indexer.RigidTransform(x_c, y_c, z_c, &x_g, &y_g, &z_g); float x_d = (x_g - x_o); float y_d = (y_g - y_o); float z_d = (z_g - z_o); BlockCache cache{0, 0, 0, -1}; bool surface_found = false; while (t < t_max) { voxel_t* voxel_ptr = GetVoxelAtT(x_o, y_o, z_o, x_d, y_d, z_d, t, cache); if (!voxel_ptr) { t_prev = t; t += block_size; } else { tsdf_prev = tsdf; tsdf = voxel_ptr->GetTSDF(); w = voxel_ptr->GetWeight(); if (tsdf_prev > 0 && w >= weight_threshold && tsdf <= 0) { surface_found = true; break; } t_prev = t; float delta = tsdf * sdf_trunc; t += delta < voxel_size ? voxel_size : delta; } } if (surface_found) { float t_intersect = (t * tsdf_prev - t_prev * tsdf) / (tsdf_prev - tsdf); x_g = x_o + t_intersect * x_d; y_g = y_o + t_intersect * y_d; z_g = z_o + t_intersect * z_d; // Trivial vertex assignment if (enable_depth) { *depth_ptr = t_intersect * depth_scale; } if (enable_vertex) { w2c_transform_indexer.RigidTransform( x_g, y_g, z_g, vertex_ptr + 0, vertex_ptr + 1, vertex_ptr + 2); } // Trilinear interpolation // TODO(wei): simplify the flow by splitting the // functions given what is enabled if (enable_color || enable_normal) { int x_b = static_cast<int>(floorf(x_g / block_size)); int y_b = static_cast<int>(floorf(y_g / block_size)); int z_b = static_cast<int>(floorf(z_g / block_size)); float x_v = (x_g - float(x_b) * block_size) / voxel_size; float y_v = (y_g - float(y_b) * block_size) / voxel_size; float z_v = (z_g - float(z_b) * block_size) / voxel_size; Key key; key.Set(0, x_b); key.Set(1, y_b); key.Set(2, z_b); int block_addr = cache.Check(x_b, y_b, z_b); if (block_addr < 0) { auto iter = hashmap_impl.find(key); if (iter == hashmap_impl.end()) return; block_addr = iter->second; cache.Update(x_b, y_b, z_b, block_addr); } int x_v_floor = static_cast<int>(floorf(x_v)); int y_v_floor = static_cast<int>(floorf(y_v)); int z_v_floor = static_cast<int>(floorf(z_v)); float ratio_x = x_v - float(x_v_floor); float ratio_y = y_v - float(y_v_floor); float ratio_z = z_v - float(z_v_floor); float sum_weight_color = 0.0; float sum_weight_normal = 0.0; for (int k = 0; k < 8; ++k) { int dx_v = (k & 1) > 0 ? 1 : 0; int dy_v = (k & 2) > 0 ? 1 : 0; int dz_v = (k & 4) > 0 ? 1 : 0; float ratio = (dx_v * (ratio_x) + (1 - dx_v) * (1 - ratio_x)) * (dy_v * (ratio_y) + (1 - dy_v) * (1 - ratio_y)) * (dz_v * (ratio_z) + (1 - dz_v) * (1 - ratio_z)); voxel_t* voxel_ptr_k = GetVoxelAtP( x_b, y_b, z_b, x_v_floor + dx_v, y_v_floor + dy_v, z_v_floor + dz_v, block_addr, cache); if (enable_color && voxel_ptr_k && voxel_ptr_k->GetWeight() > 0) { sum_weight_color += ratio; color_ptr[0] += ratio * voxel_ptr_k->GetR(); color_ptr[1] += ratio * voxel_ptr_k->GetG(); color_ptr[2] += ratio * voxel_ptr_k->GetB(); } if (enable_normal) { for (int dim = 0; dim < 3; ++dim) { voxel_t* voxel_ptr_k_plus = GetVoxelAtP( x_b, y_b, z_b, x_v_floor + dx_v + (dim == 0), y_v_floor + dy_v + (dim == 1), z_v_floor + dz_v + (dim == 2), block_addr, cache); voxel_t* voxel_ptr_k_minus = GetVoxelAtP(x_b, y_b, z_b, x_v_floor + dx_v - (dim == 0), y_v_floor + dy_v - (dim == 1), z_v_floor + dz_v - (dim == 2), block_addr, cache); bool valid = false; if (voxel_ptr_k_plus && voxel_ptr_k_plus->GetWeight() > 0) { normal_ptr[dim] += ratio * voxel_ptr_k_plus ->GetTSDF() / (2 * voxel_size); valid = true; } if (voxel_ptr_k_minus && voxel_ptr_k_minus->GetWeight() > 0) { normal_ptr[dim] -= ratio * voxel_ptr_k_minus ->GetTSDF() / (2 * voxel_size); valid = true; } sum_weight_normal += valid ? ratio : 0; } } // if (enable_normal) } // loop over 8 neighbors if (enable_color && sum_weight_color > 0) { sum_weight_color *= 255.0; color_ptr[0] /= sum_weight_color; color_ptr[1] /= sum_weight_color; color_ptr[2] /= sum_weight_color; } if (enable_normal && sum_weight_normal > 0) { normal_ptr[0] /= sum_weight_normal; normal_ptr[1] /= sum_weight_normal; normal_ptr[2] /= sum_weight_normal; float norm = sqrt(normal_ptr[0] * normal_ptr[0] + normal_ptr[1] * normal_ptr[1] + normal_ptr[2] * normal_ptr[2]); w2c_transform_indexer.Rotate( normal_ptr[0] / norm, normal_ptr[1] / norm, normal_ptr[2] / norm, normal_ptr + 0, normal_ptr + 1, normal_ptr + 2); } } // if (color or normal) } // if (tsdf < 0) }); }); #if defined(__CUDACC__) OPEN3D_CUDA_CHECK(cudaDeviceSynchronize()); #endif } } // namespace tsdf } // namespace kernel } // namespace geometry } // namespace t } // namespace open3d
CSRMatrix.h
/* * CSRMatrix.h * * Created on: May 6, 2015 * Author: Michael Wegner (michael.wegner@student.kit.edu) */ #ifndef CSRMATRIX_H_ #define CSRMATRIX_H_ #include <vector> #include "../Globals.h" #include "Vector.h" #include "../graph/Graph.h" #include "../algebraic/SparseAccumulator.h" #include "../auxiliary/Timer.h" namespace NetworKit { /** * @ingroup algebraic * The CSRMatrix class represents a sparse matrix stored in CSR-Format (i.e. compressed sparse row). * If speed is important, use this CSRMatrix instead of the Matrix class. */ class CSRMatrix { private: std::vector<index> rowIdx; std::vector<index> columnIdx; std::vector<double> nonZeros; count nRows; count nCols; bool isSorted; void quicksort(index left, index right); index partition(index left, index right); public: /** Represents a matrix entry s.t. matrix(row, column) = value */ struct Triple { index row; index column; double value; }; /** Default constructor */ CSRMatrix(); CSRMatrix(const count nRows, const count nCols, const std::vector<std::pair<index, index>> &positions, const std::vector<double> &values, bool isSorted = false); CSRMatrix(const count nRows, const count nCols, const std::vector<Triple> &triples, bool isSorted = false); CSRMatrix(const count nRows, const count nCols, const std::vector<std::vector<index>> &columnIdx, const std::vector<std::vector<double>> &values, bool isSorted = false); CSRMatrix(const count nRows, const count nCols, const std::vector<index> &rowIdx, const std::vector<index> &columnIdx, const std::vector<double> &nonZeros, bool isSorted = false); CSRMatrix (const CSRMatrix &other) = default; CSRMatrix (CSRMatrix &&other) = default; virtual ~CSRMatrix() = default; CSRMatrix& operator=(CSRMatrix &&other) = default; CSRMatrix& operator=(const CSRMatrix &other) = default; /** * @return Number of rows. */ inline count numberOfRows() const { return nRows; } /** * @return Number of columns. */ inline count numberOfColumns() const { return nCols; } /** * @param i The row index. * @return Number of non-zeros in row @a i. */ count nnzInRow(const index i) const; /** * @return Number of non-zeros in this matrix. */ count nnz() const; /** * @return Value at matrix position (i,j). */ double operator()(const index i, const index j) const; /** * Sorts the column indices in each row for faster access. */ void sort(); /** * @return True if the matrix is sorted, otherwise false. */ bool sorted() const; /** * @return Row @a i of this matrix as vector. */ Vector row(const index i) const; /** * @return Column @a j of this matrix as vector. */ Vector column(const index j) const; /** * @return The main diagonal of this matrix. */ Vector diagonal() const; /** * Adds this matrix to @a other and returns the result. * @return The sum of this matrix and @a other. */ CSRMatrix operator+(const CSRMatrix &other) const; /** * Adds @a other to this matrix. * @return Reference to this matrix. */ CSRMatrix& operator+=(const CSRMatrix &other); /** * Subtracts @a other from this matrix and returns the result. * @return The difference of this matrix and @a other. * */ CSRMatrix operator-(const CSRMatrix &other) const; /** * Subtracts @a other from this matrix. * @return Reference to this matrix. */ CSRMatrix& operator-=(const CSRMatrix &other); /** * Multiplies this matrix with a scalar specified in @a scalar and returns the result. * @return The result of multiplying this matrix with @a scalar. */ CSRMatrix operator*(const double &scalar) const; /** * Multiplies this matrix with a scalar specified in @a scalar. * @return Reference to this matrix. */ CSRMatrix& operator*=(const double &scalar); /** * Multiplies this matrix with @a vector and returns the result. * @return The result of multiplying this matrix with @a vector. */ Vector operator*(const Vector &vector) const; /** * Multiplies this matrix with @a other and returns the result in a new matrix. * @return The result of multiplying this matrix with @a other. */ CSRMatrix operator*(const CSRMatrix &other) const; /** * Divides this matrix by a divisor specified in @a divisor and returns the result in a new matrix. * @return The result of dividing this matrix by @a divisor. */ CSRMatrix operator/(const double &divisor) const; /** * Divides this matrix by a divisor specified in @a divisor. * @return Reference to this matrix. */ CSRMatrix& operator/=(const double &divisor); /** * Creates a submatrix of this matrix consisting of the rows specified in @a rows and columns specified in @a columns. * @param rows The row indices referencing the rows to include in the submatrix. * @param columns The column indices referencing the columns to include in the submatrix. * @return The submatrix of this matrix consisting of @a rows and @a columns. */ CSRMatrix subMatrix(const std::vector<index> &rows, const std::vector<index> &columns) const; /** * Computes @a A @a binaryOp @a B on the elements of matrix @a A and matrix @a B. * @param A Sorted CSRMatrix. * @param B Sorted CSRMatrix. * @param binaryOp Function handling (double, double) -> double * @return @a A @a binaryOp @a B. * @note @a A and @a B must have the same dimensions and must be sorted. */ template<typename L> static CSRMatrix binaryOperator(const CSRMatrix &A, const CSRMatrix &B, L binaryOp); /** * Computes @a A^T * @a B. * @param A * @param B * @return @a A^T * @a B. * @note The number of rows of @a A must be equal to the number of rows of @a B. */ static CSRMatrix mTmMultiply(const CSRMatrix &A, const CSRMatrix &B); /** * Computes @a A * @a B^T. * @param A * @param B * @return @a A * @a B^T. * @note The number of columns of @a A must be equal to the number of columns of @a B. */ static CSRMatrix mmTMultiply(const CSRMatrix &A, const CSRMatrix &B); /** * Computes @a matrix^T * @a vector. * @param matrix * @param vector * @return @a matrix^T * @a vector. * @note The number of rows of @a matrix must be equal to the dimension of @a vector. */ static Vector mTvMultiply(const CSRMatrix &matrix, const Vector &vector); /** * Compute the (weighted) Laplacian of the (weighted) @a graph. * @param graph * @return The (weighted) Laplacian. */ static CSRMatrix graphLaplacian(const Graph &graph); /** * Compute the (weighted) adjacency matrix of the (weighted) @a graph. * @param graph * @return The (weighted) adjacency matrix. */ static CSRMatrix adjacencyMatrix(const Graph &graph); /** * Computes a graph having the given @a laplacian. * @param laplacian * @return The graph having a Laplacian equal to @a laplacian. */ static Graph laplacianToGraph(const CSRMatrix &laplacian); /** * Interprets the @a matrix as adjacency matrix of a graph. If @a matrix is non-symmetric, the graph will be directed. * @param matrix * @return The graph having an adjacency matrix equal to @a matrix. */ static Graph matrixToGraph(const CSRMatrix &matrix); /** * Checks if @a matrix is symmetric. * @param matrix * @return True if @a matrix is symmetric, otherwise false. */ static bool isSymmetric(const CSRMatrix &matrix); /** * Checks if @a matrix is symmetric diagonally dominant (SDD). * @param matrix * @return True if @a matrix is SDD, false otherwise. */ static bool isSDD(const CSRMatrix &matrix); /** * Checks if @a matrix is a Laplacian matrix. * @param matrix * @return True if @a matrix is a Laplacian matrix, false otherwise. */ static bool isLaplacian(const CSRMatrix &matrix); /** * Transposes this matrix and returns it. * @return The transposed matrix of this matrix. */ CSRMatrix transpose() const; /** * Iterate over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template<typename L> void forNonZeroElementsInRow(index i, L handle) const; /** * Iterate in parallel over all non-zero elements of row @a row in the matrix and call handler(index column, double value) */ template<typename L> void parallelForNonZeroElementsInRow(index i, L handle) const; /** * Iterate over all non-zero elements of the matrix in row order and call handler (lambda closure). */ template<typename L> void forNonZeroElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. */ template<typename L> void parallelForNonZeroElementsInRowOrder(L handle) const; /** * Iterate in parallel over all rows and call handler (lambda closure) on non-zero elements of the matrix. */ template<typename L> void parallelForNonZeroElementsInRowOrder(L handle); }; template<typename L> inline CSRMatrix NetworKit::CSRMatrix::binaryOperator(const CSRMatrix &A, const CSRMatrix &B, L binaryOp) { assert(A.nRows == B.nRows && A.nCols == B.nCols); if (!A.sorted() || !B.sorted()) throw std::runtime_error("The matrices must be sorted for this operation"); std::vector<index> rowIdx(A.nRows+1); std::vector<std::vector<index>> columns(A.nRows); rowIdx[0] = 0; #pragma omp parallel for for (index i = 0; i < A.nRows; ++i) { index k = A.rowIdx[i]; index l = B.rowIdx[i]; while (k < A.rowIdx[i+1] && l < B.rowIdx[i+1]) { if (A.columnIdx[k] < B.columnIdx[l]) { columns[i].push_back(A.columnIdx[k]); ++k; } else if (A.columnIdx[k] > B.columnIdx[l]) { columns[i].push_back(B.columnIdx[l]); ++l; } else { // A.columnIdx[k] == B.columnIdx[l] columns[i].push_back(A.columnIdx[k]); ++k; ++l; } ++rowIdx[i+1]; } while (k < A.rowIdx[i+1]) { columns[i].push_back(A.columnIdx[k]); ++k; ++rowIdx[i+1]; } while (l < B.rowIdx[i+1]) { columns[i].push_back(B.columnIdx[l]); ++l; ++rowIdx[i+1]; } } for (index i = 0; i < A.nRows; ++i) { rowIdx[i+1] += rowIdx[i]; } count nnz = rowIdx[A.nRows]; std::vector<index> columnIdx(nnz); std::vector<double> nonZeros(nnz, 0.0); #pragma omp parallel for for (index i = 0; i < A.nRows; ++i) { for (index cIdx = rowIdx[i], j = 0; cIdx < rowIdx[i+1]; ++cIdx, ++j) { columnIdx[cIdx] = columns[i][j]; } columns[i].clear(); columns[i].resize(0); columns[i].shrink_to_fit(); } #pragma omp parallel for for (index i = 0; i < A.nRows; ++i) { index k = A.rowIdx[i]; index l = B.rowIdx[i]; for (index cIdx = rowIdx[i]; cIdx < rowIdx[i+1]; ++cIdx) { if (k < A.rowIdx[i+1] && columnIdx[cIdx] == A.columnIdx[k]) { nonZeros[cIdx] = A.nonZeros[k]; ++k; } if (l < B.rowIdx[i+1] && columnIdx[cIdx] == B.columnIdx[l]) { nonZeros[cIdx] = binaryOp(nonZeros[cIdx], B.nonZeros[l]); ++l; } } } return CSRMatrix(A.nRows, A.nCols, rowIdx, columnIdx, nonZeros, true); // std::vector<int64_t> columnPointer(A.nCols, -1); // std::vector<double> Arow(A.nCols, 0.0); // std::vector<double> Brow(A.nCols, 0.0); // std::vector<Triple> triples; // // for (index i = 0; i < A.nRows; ++i) { // index listHead = 0; // count nnz = 0; // // // search for nonZeros in our own matrix // for (index k = A.rowIdx[i]; k < A.rowIdx[i+1]; ++k) { // index j = A.columnIdx[k]; // Arow[j] = A.nonZeros[k]; // // columnPointer[j] = listHead; // listHead = j; // nnz++; // } // // // search for nonZeros in the other matrix // for (index k = B.rowIdx[i]; k < B.rowIdx[i+1]; ++k) { // index j = B.columnIdx[k]; // Brow[j] = B.nonZeros[k]; // // if (columnPointer[j] == -1) { // our own matrix does not have a nonZero entry in column j // columnPointer[j] = listHead; // listHead = j; // nnz++; // } // } // // // apply operator on the found nonZeros in A and B // for (count k = 0; k < nnz; ++k) { // double value = binaryOp(Arow[listHead], Brow[listHead]); // if (value != 0.0) { // triples.push_back({i, listHead, value}); // } // // index temp = listHead; // listHead = columnPointer[listHead]; // // // reset for next row // columnPointer[temp] = -1; // Arow[temp] = 0.0; // Brow[temp] = 0.0; // } // // nnz = 0; // } // // return CSRMatrix(A.nRows, A.nCols, triples); } } /* namespace NetworKit */ template<typename L> inline void NetworKit::CSRMatrix::forNonZeroElementsInRow(index i, L handle) const { for (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) { handle(columnIdx[k], nonZeros[k]); } } template<typename L> inline void NetworKit::CSRMatrix::parallelForNonZeroElementsInRow(index i, L handle) const { #pragma omp parallel for for (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) { handle(columnIdx[k], nonZeros[k]); } } template<typename L> inline void NetworKit::CSRMatrix::forNonZeroElementsInRowOrder(L handle) const { for (index i = 0; i < nRows; ++i) { for (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) { handle(i, columnIdx[k], nonZeros[k]); } } } template<typename L> inline void NetworKit::CSRMatrix::parallelForNonZeroElementsInRowOrder(L handle) const { #pragma omp parallel for for (index i = 0; i < nRows; ++i) { for (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) { handle(i, columnIdx[k], nonZeros[k]); } } } template<typename L> inline void NetworKit::CSRMatrix::parallelForNonZeroElementsInRowOrder(L handle) { #pragma omp parallel for for (index i = 0; i < nRows; ++i) { for (index k = rowIdx[i]; k < rowIdx[i+1]; ++k) { handle(i, columnIdx[k], nonZeros[k]); } } } #endif /* TESTMATRIX_H_ */
GB_unaryop__abs_bool_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_bool_fp64 // op(A') function: GB_tran__abs_bool_fp64 // C type: bool // A type: double // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_bool_fp64 ( bool *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_bool_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
diagsm_x_dia_n_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_Number diag[A->rows]; memset(diag, '\0', A->rows * sizeof(ALPHA_Number)); int num_thread = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for (ALPHA_INT i = 0; i < A->ndiag; i++) { if(A->distance[i] == 0) { for (ALPHA_INT r = 0; r < A->rows; r++) { diag[r] = A->values[i * A->lval + r]; } } } #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for (ALPHA_INT r = 0; r < A->rows; ++r) { for (ALPHA_INT c = 0; c < columns; ++c) { ALPHA_Number t; alpha_setzero(t); alpha_mul(t, alpha, x[index2(r, c, ldx)]); alpha_div(y[index2(r, c, ldy)], t, diag[r]); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
target-3c.c
// ---------------------------------------------------------------------------------------- // Implementation of Example target.3c (Section 52.3, page 196) from Openmp // 4.0.2 Examples // on the document http://openmp.org/mp-documents/openmp-examples-4.0.2.pdf // // // // // ---------------------------------------------------------------------------------------- #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <time.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> #endif #include "BenchmarksUtil.h" // define the error threshold for the results "not matching" #define ERROR_THRESHOLD 0.05 /* Problem size */ #define N 8192 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init(DATA_TYPE *A, DATA_TYPE *B) { int i; for (i = 0; i < N; i++) { A[i] = i / 2.0; B[i] = ((N - 1) - i) / 3.0; } return; } void vec_mult(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int i; for (i = 0; i < N; i++) C[i] = A[i] * B[i]; } void vec_mult_OMP(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int i; #pragma omp target map(to : A[ : N], \ B[ : N]) map(from : C[ : N]) device(DEVICE_ID) #pragma omp parallel for for (i = 0; i < N; i++) C[i] = A[i] * B[i]; } int compareResults(DATA_TYPE *B, DATA_TYPE *B_GPU) { int i, fail; fail = 0; // Compare B and B_GPU for (i = 0; i < N; i++) { if (percentDiff(B[i], B_GPU[i]) > ERROR_THRESHOLD) { fail++; } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", ERROR_THRESHOLD, fail); return fail; } int main(int argc, char *argv[]) { double t_start, t_end, t_start_OMP, t_end_OMP; int fail = 0; DATA_TYPE *A; DATA_TYPE *B; DATA_TYPE *C; DATA_TYPE *C_OMP; A = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); B = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); C = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); C_OMP = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); fprintf(stdout, ">> Two vector multiplication <<\n"); // initialize the arrays init(A, B); t_start_OMP = rtclock(); vec_mult_OMP(A, B, C_OMP); t_end_OMP = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end_OMP - t_start_OMP); //); #ifdef RUN_TEST t_start = rtclock(); vec_mult(A, B, C); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); //); fail = compareResults(C, C_OMP); #endif free(A); free(B); free(C); free(C_OMP); return fail; }
algo.c
/* * algo.c * * Created on: 2011-09-25 * Author: francis */ #define _GNU_SOURCE #include <stdlib.h> #include <stdio.h> #include <inttypes.h> #include <string.h> #include "algo.h" #include "chunk.h" #include "omp.h" int sigma(int n) { return (n + 1) * n; } struct cs { uint64_t checksum; } __attribute__((aligned(64))); int encode_fast(struct chunk *chunk) { // TODO int i; int area = chunk->area; int key = chunk->key; char *data = chunk->data; uint64_t checksum = 0; #pragma omp parallel for private(i) reduction(+:checksum) for (i = 0; i < area; i++) { data[i] = data[i] + key; checksum += data[i]; } chunk->checksum = checksum; return 0; } int encode_slow_a(struct chunk *chunk) { int i, j; uint64_t checksum = 0; #pragma omp parallel for private(i,j) reduction(+:checksum) for (i = 0; i < chunk->height; i++) { for (j = 0; j < chunk->width; j++) { int index = i * chunk->width + j; chunk->data[index] = chunk->data[index] + chunk->key; checksum += chunk->data[index]; } } chunk->checksum = checksum; return 0; } int encode_slow_b(struct chunk *chunk) { int i; int area = chunk->area; int key = chunk->key; char *data = chunk->data; uint64_t* checksums; int n; #pragma omp parallel private(i) { #pragma omp single { n = omp_get_num_threads(); checksums = calloc(n, sizeof(uint64_t)); } #pragma omp barrier int id = omp_get_thread_num(); for (i = id; i < area; i += n) { data[i] = data[i] + key; checksums[id] += data[i]; } #pragma omp barrier } chunk->checksum = 0; for (i = 0; i < n; i++) chunk->checksum += checksums[i]; return 0; } int encode_slow_c(struct chunk *chunk) { int i; int checksum = 0; char *data = chunk->data; int area = chunk->area; int key = chunk->key; #pragma omp parallel for for (i = 0; i < area; i++) { data[i] = data[i] + key; #pragma omp atomic checksum += data[i]; } chunk->checksum = checksum; return 0; } int encode_slow_d(struct chunk *chunk) { int i; int checksum = 0; char *data = chunk->data; int area = chunk->area; int key = chunk->key; #pragma omp parallel for for (i = 0; i < area; i++) { data[i] = data[i] + key; #pragma omp critical { checksum += data[i]; } } chunk->checksum = checksum; return 0; } int encode_slow_e(struct chunk *chunk) { int i, j; int checksum = 0; int width = chunk->width; int height = chunk->height; int key = chunk->key; char *data = chunk->data; #pragma omp parallel for private(i,j) reduction(+:checksum) for (i = 0; i < width; i++) { for (j = 0; j < height; j++) { int index = i + j * width; data[index] = data[index] + key; checksum += data[index]; } } chunk->checksum = checksum; return 0; } int encode_slow_f(struct chunk *chunk) { int i; int area = chunk->area; int key = chunk->key; char *data = chunk->data; struct cs* cs; int n; int sig; uint64_t checksum; #pragma omp parallel private(i, checksum) { #pragma omp single { n = omp_get_num_threads(); cs = calloc(n, sizeof(struct cs)); sig = sigma(n); } #pragma omp barrier checksum = 0; int id = omp_get_thread_num(); int start = (int) (((uint64_t)sigma(id)) * area / sig); int end = (int) (((uint64_t)sigma(id + 1)) * area / sig); for (i = start; i < end; i++) { data[i] = data[i] + key; checksum += data[i]; } #pragma omp barrier cs[id].checksum = checksum; } chunk->checksum = 0; for (i = 0; i < n; i++) chunk->checksum += cs[i].checksum; return 0; }
bench.c
#include "omp.h" #include "pmsis.h" #define LOOP_ITER (2048) #define NB_ITER (256) #define NB_BARRIER_ITER (256) #define NB_ITER_SINGLE (128) #define CORE_ID pi_core_id() #define PRINTF(...) //#define PRINTF(...) printf(__VA_ARGS__) static void start_timer() { pi_perf_cl_reset(); pi_perf_conf(1<<PI_PERF_CYCLES); pi_perf_cl_start(); } static void reset_timer() { pi_perf_cl_reset(); } static unsigned int get_time() { return pi_perf_cl_read(PI_PERF_CYCLES); } static inline unsigned int startTimer() { PRINTF("Starting timer\n"); reset_timer(); start_timer(); return 0; } static inline unsigned int getTimer(unsigned int start) { PRINTF("Ending timer\n"); return get_time(); } void test_barrier(unsigned int nthreads) { #pragma omp parallel num_threads(nthreads) shared(nthreads) { unsigned int start; int i; float operation_cost = 0; if (omp_get_thread_num() == 0) { start = startTimer(); } for (i = 0; i < NB_BARRIER_ITER; i++) { #pragma omp barrier } if (omp_get_thread_num() == 0) { unsigned int end = getTimer(start); operation_cost = (float) end / NB_BARRIER_ITER; printf("BARRIER %d threads: %f cycles\n", nthreads, operation_cost); } } } void test_critical(unsigned int nthreads) { #pragma omp parallel num_threads(nthreads) { int i; unsigned int start = startTimer(); float operation_cost = 0; for (i = 0; i < NB_ITER; i++) { #pragma omp critical { volatile int a = 0; } } #pragma omp barrier operation_cost = (float) getTimer(start) / NB_ITER; if (CORE_ID == 0) { printf("CRITICAL %d threads: %.3f cycles\n", nthreads, operation_cost); } } } void test_parallel_loop_static(unsigned int nthreads) { int i; int j; unsigned int start = startTimer(); float iteration_cost = 0; for (i = 0; i < NB_ITER; i++) { #pragma omp parallel for num_threads(nthreads) for (j = 0; j < LOOP_ITER; j++) { volatile int a = j; } } iteration_cost = ((float) getTimer(start)/(NB_ITER * LOOP_ITER)); printf("PARALLEL FOR %d threads STATIC %d iter: %.3f cycle(s) per iteration\n", nthreads, LOOP_ITER, iteration_cost); } void test_parallel_single(unsigned int nthreads) { #pragma omp parallel num_threads(nthreads) { int i; int j; unsigned int start = startTimer(); float iteration_cost = 0; for (i = 0; i < NB_ITER; i++) { #pragma omp single { volatile int a = 0; } } if (omp_get_thread_num() == 0) { iteration_cost = ((float) getTimer(start)/(NB_ITER * LOOP_ITER)); printf("PARALLEL SINGLE %d threads STATIC %d iter: %.3f cycle(s) per iteration\n", nthreads, LOOP_ITER, iteration_cost); } } } void test_entry() { for (int i = 1; i <= pi_cl_cluster_nb_cores(); i++) { test_barrier(i); } printf("\n"); for (int i = 1; i <= pi_cl_cluster_nb_cores(); i++) { test_critical(i); } printf("\n"); for (int i = 1; i <= pi_cl_cluster_nb_cores(); i++) { test_parallel_loop_static (i); } printf("\n"); for (int i = 1; i <= pi_cl_cluster_nb_cores(); i++) { test_parallel_single(i); } } void launch_test(void) { printf("Entering main controller\n"); uint32_t errors = 0; uint32_t core_id = pi_core_id(), cluster_id = pi_cluster_id(); struct pi_device cluster_dev = {0}; struct pi_cluster_conf cl_conf = {0}; /* Init cluster configuration structure. */ pi_cluster_conf_init(&cl_conf); cl_conf.id = 0; /* Set cluster ID. */ /* Configure & open cluster. */ pi_open_from_conf(&cluster_dev, &cl_conf); if (pi_cluster_open(&cluster_dev)) { printf("Cluster open failed !\n"); pmsis_exit(-1); } /* Prepare cluster task and send it to cluster. */ struct pi_cluster_task cl_task = {0}; cl_task.entry = test_entry; cl_task.arg = NULL; pi_cluster_send_task_to_cl(&cluster_dev, &cl_task); pi_cluster_close(&cluster_dev); printf("Test success !\n"); pmsis_exit(errors); } /* Program Entry. */ int main(void) { printf("\n\n\t *** OpenMP Benchmark ***\n\n"); return pmsis_kickoff((void *) launch_test); }
TM_to_SC.c
//START_INCLUDES #include "q_incs.h" //STOP_INCLUDES #include "TM_to_SC.h" //START_FUNC_DECL int TM_to_SC( struct tm *inv, uint64_t n_in, const char *format, char * outv, uint32_t width // remember that this DOES include nullc ) //STOP_FUNC_DECL { int status = 0; if ( inv == NULL ) { go_BYE(-1); } if ( outv == NULL ) { go_BYE(-1); } if ( n_in == 0 ) { go_BYE(-1); } if ( width == 0 ) { go_BYE(-1); } #pragma omp parallel for schedule(static, 1024) for ( uint64_t i = 0; i < n_in; i++ ) { memset(outv+(i*width), '\0', width); size_t rslt = strftime(outv+(i*width), width-1, format, inv + i); if ( rslt == 0 ) { status = -1; continue; } } cBYE(status); BYE: return status; }
lowly_pol.c
#ifdef __PLANCK__ #include "HL2_likely/target/lowly_pol.h" #else #include "lowly_pol.h" #endif /******************************************************************************* This part assumes that T and Pol mask are identical ******************************************************************************/ #undef __FUNC__ #define __FUNC__ "scalCovPol" /* Polarized pixel covariance for a given rotation */ /* This is a 3x3 matrix, expressed as a linear array */ void scalCovPol (double *covmat, const double cb, const double c2a, const double s2a, const double c2g, const double s2g, const long lmax, const double *qtt, const double *qee, const double *qbb, const double *qte, const double *qtb, const double *qeb) { double c2apg, s2apg, c2amg, s2amg; double xitt, xip, rxim, ixim, rxic, ixic; double rxic2,ixic2,rxim2,ixim2,rxip2,ixip2; double dl00, dl22, dl2m2, dl20; double plm2, plm1; long l; double dl; /* Construct cos2(alpha \pm gamma), sin2(alpha \pm gamma) */ c2apg = c2a*c2g-s2a*s2g; s2apg = s2a*c2g+c2a*s2g; c2amg = c2a*c2g+s2a*s2g; s2amg = s2a*c2g-c2a*s2g; /* Recurrence loop to construct rotation invariant correlations */ /* Temperature auto-correlation */ plm2=1.0; plm1=cb; xitt = 1.0*plm2*qtt[0] + 3.0*plm1*qtt[1]; for (l=2;l<=lmax;l++) { dl = (double) l; dl00 = 2.0*cb*plm1 - plm2 - (cb*plm1-plm2)/dl; xitt += (2.0*dl+1)*dl00*qtt[l]; plm2 = plm1; plm1 = dl00; } xitt /= 4.0*M_PI; /* Temperature-polarization cross-correlation */ plm2 = sqrt(6.0)*(1.0+cb)*(1.0-cb)/4.0; /* d^2_{20} */ plm1 = sqrt(5.0)*cb*plm2; /* d^3_{20} */ rxic = 5.0*plm2*qte[2] + 7.0*plm1*qte[3]; ixic = -(5.0*plm2*qtb[2] + 7.0*plm1*qtb[3]); for (l=4;l<=lmax;l++) { dl = (double)l; dl20 = -(2.0*dl-1)/sqrt((dl-2.0)*(dl+2.0))* ( -cb*plm1 + sqrt((dl-3.0)*(dl+1.0))/(2.0*dl-1.0)*plm2 ); rxic += (2.0*dl+1.0)*dl20*qte[l]; ixic -= (2.0*dl+1.0)*dl20*qtb[l]; plm2 = plm1; plm1 = dl20; } rxic /= 4.0*M_PI; ixic /= 4.0*M_PI; /* Polarization '+' correlation, based on d^l_{22} */ plm2 = (1.0+cb)*(1.0+cb)/4.; /* d^2_{22} */ plm1 = (3.0*cb-2.0)*plm2; //!!! xip = 5.0*plm2*(qee[2]+qbb[2]) + 7.0*plm1*(qee[3]+qbb[3]); for (l=4;l<=lmax;l++) { dl = (double)l; dl22 = -dl*(2.0*dl-1.0)/((dl-2.0)*(dl+2.0))* ( (4.0/dl/(dl-1.0)-cb)*plm1 +(dl-3.0)*(dl+1.0)/((dl-1.0)*(2.0*dl-1.0))*plm2 ); xip += (2.0*dl+1.0)*dl22*(qee[l]+qbb[l]); plm2 = plm1; plm1 = dl22; } xip /= 4.0*M_PI; /* Polarization '-' correlation, based on d^l_{2-2} */ plm2 = (1.0-cb)*(1.0-cb)/4.0; /* d^2_{2-2} */ plm1 = (3.0*cb+2.0)*plm2; ///!!! rxim = 5.0*plm2*(qee[2]-qbb[2]) + 7.0*plm1*(qee[3]-qbb[3]); ixim = -2.0*(5.0*plm2*qeb[2] + 7.0*plm1*qeb[3]); for (l=4;l<=lmax;l++) { dl = (double)l; dl2m2 = -dl*(2.0*dl-1.0)/((dl-2.0)*(dl+2.0))* ( (-4.0/dl/(dl-1.0)-cb)*plm1 +(dl-3.0)*(dl+1.0)/((dl-1.0)*(2.0*dl-1.0))*plm2 ); rxim += (2.0*dl+1)*dl2m2*(qee[l]-qbb[l]); ixim -= 2.0*(2.0*dl+1.0)*dl2m2*qeb[l]; plm2 = plm1; plm1 = dl2m2; } rxim /= 4.0*M_PI; ixim /= 4.0*M_PI; /* Now put frame dependence (alpha, gamma), suffix '2'*/ rxic2 = rxic*c2g - ixic*s2g; ixic2 = rxic*s2g + ixic*c2g; rxip2 = xip*c2amg; ixip2 = xip*s2amg; rxim2 = rxim*c2apg - ixim*s2apg; ixim2 = rxim*s2apg + ixim*c2apg; /* Now fill covariance matrix */ //covmat = (double*) malloc_err(9*sizeof(double),err); covmat[0] = xitt; covmat[1] = rxic2; /* TQ */ covmat[2] = ixic2; /* TU */ covmat[3] = covmat[1]; covmat[4] = (rxip2+rxim2)/2.0; /* QQ */ covmat[5] = (ixip2+ixim2)/2.0; /* QU */ covmat[6] = covmat[2]; covmat[7] = covmat[5]; covmat[8] = (rxip2-rxim2)/2.0; /* UU */ return; } #undef __FUNC__ #define __FUNC__ "build_trigo_matrices" double* build_trigo_matrices(const long npix_seen, const double * xyz, error **err) { long i,j,d; double e_r_i[3], e_r_j[3], e_theta_i[3], e_phi_i[3]; double cb, norm, den, x1, x2; double *res,*cb_ij, *c2psi_ij, *s2psi_ij; res = (double*) malloc_err(3*_SZT_(npix_seen)*_SZT_(npix_seen)*sizeof(double),err); forwardError(*err,__LINE__,NULL); cb_ij = res; c2psi_ij = res + npix_seen*npix_seen; s2psi_ij = res + 2*npix_seen*npix_seen; for (i=0;i<npix_seen;i++) { for (j=0;j<npix_seen;j++) { if (i==j) { // diagonal case: same pixel 'pair' cb_ij[i*npix_seen+i]=1.0; c2psi_ij[i*npix_seen+i]=1.0; s2psi_ij[i*npix_seen+i]=0.0; } else { // Get local values of e_r for this pixel pair for (d=0;d<3;d++) { e_r_i[d] = xyz[3*i+d]; e_r_j[d] = xyz[3*j+d]; } // e_phi = (-y,x,0)/sqrt(x^2+y^2) e_phi_i[0] = -e_r_i[1]; e_phi_i[1] = e_r_i[0]; e_phi_i[2] = 0.0; norm = sqrt(e_phi_i[0]*e_phi_i[0]+e_phi_i[1]*e_phi_i[1]); e_phi_i[0] /= norm; e_phi_i[1] /= norm; // e_theta = e_phi x e_r e_theta_i[0] = e_phi_i[1]*e_r_i[2] - e_phi_i[2]*e_r_i[1]; e_theta_i[1] = -e_phi_i[0]*e_r_i[2] + e_phi_i[2]*e_r_i[0]; e_theta_i[2] = e_phi_i[0]*e_r_i[1] - e_phi_i[1]*e_r_i[0]; x1=0.0; x2=0.0; cb=0.0; // Compute dot products for (d=0;d<3;d++) { x1 += e_theta_i[d]* e_r_j[d]; x2 += e_phi_i[d] * e_r_j[d]; cb += e_r_i[d] * e_r_j[d]; } //if (cb > 1.0) cb=1.0; //if (cb < -1.0) cb=-1.0; // Store cb cb_ij[i*npix_seen+j] = cb; // Compute c2psi, s2psi den = x1*x1+x2*x2; if (fabs(den) < 1e-10) {// Paire antipodale c2psi_ij[i*npix_seen+j]=1.0; s2psi_ij[i*npix_seen+j]=0.0; } else { c2psi_ij[i*npix_seen+j] = (x1*x1-x2*x2)/den; s2psi_ij[i*npix_seen+j] = 2.0*x1*x2/den; } } } } return res; } #undef __FUNC__ #define __FUNC__ "build_trig_mat_plist" double* build_trig_mat_plist(const long nside, const long * pixel_indices, const long npix_seen, const int ordering, error **err) { double *posvec; double *res; posvec = lowly_get_posvec(nside,pixel_indices,npix_seen,ordering,err); forwardError(*err,__LINE__,NULL); res = build_trigo_matrices(npix_seen,posvec,err); forwardError(*err,__LINE__,NULL); free(posvec); return res; } #undef __FUNC__ #define __FUNC__ "build_cov_matrix_pol" double* build_cov_matrix_pol (double *orig, const double *trig_mat, const double *noisevar, const long npix_seen, const long lmax, const double * qtt, const double * qee, const double * qbb, const double * qte, const double * qtb, const double * qeb, error **err) { long i,j; double cb, c2a, c2g, s2a, s2g; double cov[9]; double *covmat; double *cb_ij,*c2psi_ij,*s2psi_ij; MALLOC_IF_NEEDED(covmat,orig,9*_SZT_(npix_seen)*_SZT_(npix_seen)*sizeof(double),err); forwardError(*err,__LINE__,NULL); memset((void*)covmat,0,9*npix_seen*npix_seen*sizeof(double)); cb_ij = trig_mat; c2psi_ij = trig_mat + npix_seen*npix_seen; s2psi_ij = trig_mat + 2*npix_seen*npix_seen; for (i=0;i<npix_seen;i++) { //for (j=0;j<=i;j++) { for (j=0;j<npix_seen;j++) { cb = cb_ij[i*npix_seen+j]; c2a = c2psi_ij[i*npix_seen+j]; c2g = c2psi_ij[j*npix_seen+i]; s2a = s2psi_ij[i*npix_seen+j]; s2g = s2psi_ij[j*npix_seen+i]; scalCovPol(cov,cb,c2a,s2a,c2g,s2g,lmax,qtt,qee,qbb,qte,qtb,qeb); // Add noise if (i==j) { cov[0] += noisevar[i]; cov[4] += noisevar[npix_seen+i]; cov[8] += noisevar[2*npix_seen+i]; } // Now store in big monster matrix covmat[3*i*npix_seen+j]=cov[0]; //TT lower //covmat[3*j*npix_seen+i]=cov[0]; //TT upper // PATCH: TAKE -transpose(TQ,TU): needs to find why !!! // covmat[(j+npix_seen)*3*npix_seen+i]=-cov[1]; //TQ lower covmat[i*3*npix_seen+npix_seen+j]=-cov[1]; //TQ upper covmat[(j+2*npix_seen)*3*npix_seen+i]=-cov[2]; //TU lower covmat[i*3*npix_seen+2*npix_seen+j]=-cov[2]; //TU upper // covmat[(i+npix_seen)*3*npix_seen+npix_seen+j]=cov[4]; //QQ lower //covmat[(j+npix_seen)*3*npix_seen+npix_seen+i]=cov[4]; //QQ upper covmat[(i+2*npix_seen)*3*npix_seen+npix_seen+j]=cov[5]; //QU lower covmat[(j+npix_seen)*3*npix_seen+2*npix_seen+i]=cov[5]; //QU upper covmat[(i+2*npix_seen)*3*npix_seen+2*npix_seen+j]=cov[8]; //UU lower //covmat[(j+2*npix_seen)*3*npix_seen+2*npix_seen+i]=cov[8]; //UU upper } } return covmat; } /******************************************************************************* This part allows T and Pol mask to be different ******************************************************************************/ #undef __FUNC__ #define __FUNC__ "scalCovCross" /* Pixel TQ, TU covariance for a given separation, and a given third euler angle */ void scalCovCross(double *covcross, const double cb, const double c2g, const double s2g, const long lmax, const double *qte, const double *qtb) { double rxic, ixic; double rxic2,ixic2; double dl20; double plm2, plm1; long l; double dl; /* Temperature-polarization cross-correlation */ plm2 = sqrt(6.0)*(1.0+cb)*(1.0-cb)/4.0; /* d^2_{20} */ plm1 = sqrt(5.0)*cb*plm2; /* d^3_{20} */ rxic = 5.0*plm2*qte[2] + 7.0*plm1*qte[3]; ixic = -(5.0*plm2*qtb[2] + 7.0*plm1*qtb[3]); //_DEBUGHERE_("%g %g %g %g",rxic,ixic,qte[2],qtb[2]); for (l=4;l<=lmax;l++) { dl = (double)l; dl20 = -(2.0*dl-1)/sqrt((dl-2.0)*(dl+2.0))* ( -cb*plm1 + sqrt((dl-3.0)*(dl+1.0))/(2.0*dl-1.0)*plm2 ); rxic += (2.0*dl+1.0)*dl20*qte[l]; ixic -= (2.0*dl+1.0)*dl20*qtb[l]; plm2 = plm1; plm1 = dl20; } rxic /= 4.0*M_PI; ixic /= 4.0*M_PI; /* Now rotate into e_theta, e_phi frame */ rxic2 = rxic*c2g - ixic*s2g; ixic2 = rxic*s2g + ixic*c2g; // Store result covcross[0]=rxic2; covcross[1]=ixic2; } #undef __FUNC__ #define __FUNC__ "scalCovQU" /* Pixel QQ,UU,QU covariance for a given separation and euler angles */ // Result stored as [QQ,UU,QU] void scalCovQU(double *covQU, const double cb, const double c2a, const double s2a, const double c2g, const double s2g, const long lmax, const double *qee, const double *qbb, const double *qeb) { double c2apg, s2apg, c2amg, s2amg; double xip, rxim, ixim; double rxim2,ixim2,rxip2,ixip2; double dl22, dl2m2; double plm2, plm1; long l; double dl; /* Construct cos2(alpha \pm gamma), sin2(alpha \pm gamma) */ c2apg = c2a*c2g-s2a*s2g; s2apg = s2a*c2g+c2a*s2g; c2amg = c2a*c2g+s2a*s2g; s2amg = s2a*c2g-c2a*s2g; /* Polarization '+' correlation, based on d^l_{22} */ plm2 = (1.0+cb)*(1.0+cb)/4.; /* d^2_{22} */ plm1 = (3.0*cb-2.0)*plm2; //!!! xip = 5.0*plm2*(qee[2]+qbb[2]) + 7.0*plm1*(qee[3]+qbb[3]); for (l=4;l<=lmax;l++) { dl = (double)l; dl22 = -dl*(2.0*dl-1.0)/((dl-2.0)*(dl+2.0))* ( (4.0/dl/(dl-1.0)-cb)*plm1 +(dl-3.0)*(dl+1.0)/((dl-1.0)*(2.0*dl-1.0))*plm2 ); xip += (2.0*dl+1.0)*dl22*(qee[l]+qbb[l]); plm2 = plm1; plm1 = dl22; } xip /= 4.0*M_PI; /* Polarization '-' correlation, based on d^l_{2-2} */ plm2 = (1.0-cb)*(1.0-cb)/4.0; /* d^2_{2-2} */ plm1 = (3.0*cb+2.0)*plm2; ///!!! rxim = 5.0*plm2*(qee[2]-qbb[2]) + 7.0*plm1*(qee[3]-qbb[3]); ixim = -2.0*(5.0*plm2*qeb[2] + 7.0*plm1*qeb[3]); for (l=4;l<=lmax;l++) { dl = (double)l; dl2m2 = -dl*(2.0*dl-1.0)/((dl-2.0)*(dl+2.0))* ( (-4.0/dl/(dl-1.0)-cb)*plm1 +(dl-3.0)*(dl+1.0)/((dl-1.0)*(2.0*dl-1.0))*plm2 ); rxim += (2.0*dl+1)*dl2m2*(qee[l]-qbb[l]); ixim -= 2.0*(2.0*dl+1.0)*dl2m2*qeb[l]; plm2 = plm1; plm1 = dl2m2; } rxim /= 4.0*M_PI; ixim /= 4.0*M_PI; /* Now put frame dependence (alpha, gamma), suffix '2'*/ rxip2 = xip*c2amg; ixip2 = xip*s2amg; rxim2 = rxim*c2apg - ixim*s2apg; ixim2 = rxim*s2apg + ixim*c2apg; covQU[0] = (rxip2+rxim2)/2.0; /* QQ */ covQU[1] = (ixip2+ixim2)/2.0; /* QU */ covQU[2] = (rxip2-rxim2)/2.0; /* UU */ } #undef __FUNC__ #define __FUNC__ "build_trigo_matrices_general" double* build_trigo_matrices_general (const long npix_temp, const long npix_pol, const double * xyz_temp, const double * xyz_pol, error **err) { long i,j,d,TROIS; double *e_r_i, *e_r_j, e_theta_i[3], e_phi_i[3]; double cb, norm, den, x1, x2; double *res,*cb_ij, *c2psi_ij, *s2psi_ij; long npix_sum; double * xyz; npix_sum = npix_temp + npix_pol; xyz = (double*) malloc_err(3*npix_sum*sizeof(double),err); forwardError(*err,__LINE__,NULL); memcpy(xyz,xyz_temp,3*npix_temp*sizeof(double)); memcpy(xyz+3*npix_temp,xyz_pol,3*npix_pol*sizeof(double)); TROIS = 3; if (npix_pol==0) { TROIS = 1; } res = (double*) malloc_err(TROIS*_SZT_(npix_sum)*_SZT_(npix_sum)*sizeof(double),err); forwardError(*err,__LINE__,NULL); cb_ij = res; c2psi_ij = res + npix_sum*npix_sum; s2psi_ij = res + 2*npix_sum*npix_sum; //Everybody #pragma omp parallel for default (shared) private (i,j,d,e_r_i,e_r_j,e_phi_i,norm,e_theta_i,x1,x2,cb,den) for (i=0;i<npix_sum;i++) { for (j=0;j<npix_sum;j++) { if (i==j) { // diagonal case: same pixel 'pair' cb_ij[i*npix_sum+i]=1.0; } else { // Get local values of e_r for this pixel pair e_r_i = &xyz[3*i]; e_r_j = &xyz[3*j]; cb=0.0; // Compute dot products for (d=0;d<3;d++) { cb += e_r_i[d] * e_r_j[d]; } cb_ij[i*npix_sum+j] = cb; } } } if (npix_pol==0) { free(xyz); return res; } //Polar only #pragma omp parallel for default (shared) private (i,j,d,e_r_i,e_r_j,e_phi_i,norm,e_theta_i,x1,x2,cb,den) for (i=0;i<npix_sum;i++) { for (j=0;j<npix_sum;j++) { if (i==j) { // diagonal case: same pixel 'pair' c2psi_ij[i*npix_sum+i]=1.0; s2psi_ij[i*npix_sum+i]=0.0; } else { // Get local values of e_r for this pixel pair e_r_i = &xyz[3*i]; e_r_j = &xyz[3*j]; // e_phi = (-y,x,0)/sqrt(x^2+y^2) e_phi_i[0] = -e_r_i[1]; e_phi_i[1] = e_r_i[0]; e_phi_i[2] = 0.0; norm = sqrt(e_phi_i[0]*e_phi_i[0]+e_phi_i[1]*e_phi_i[1]); e_phi_i[0] /= norm; e_phi_i[1] /= norm; // e_theta = e_phi x e_r e_theta_i[0] = e_phi_i[1]*e_r_i[2] - e_phi_i[2]*e_r_i[1]; e_theta_i[1] = -e_phi_i[0]*e_r_i[2] + e_phi_i[2]*e_r_i[0]; e_theta_i[2] = e_phi_i[0]*e_r_i[1] - e_phi_i[1]*e_r_i[0]; x1=0.0; x2=0.0; cb=0.0; // Compute dot products for (d=0;d<3;d++) { x1 += e_theta_i[d]* e_r_j[d]; x2 += e_phi_i[d] * e_r_j[d]; } // Compute c2psi, s2psi den = x1*x1+x2*x2; if (fabs(den) < 1e-10) {// Antipodal pair, or duplicated pixel c2psi_ij[i*npix_sum+j]=1.0; s2psi_ij[i*npix_sum+j]=0.0; } else { c2psi_ij[i*npix_sum+j] = (x1*x1-x2*x2)/den; s2psi_ij[i*npix_sum+j] = 2.0*x1*x2/den; } } } } free(xyz); return res; } #undef __FUNC__ #define __FUNC__ "build_trig_mat_general_plist" double* build_trig_mat_general_plist(const long nside, const long * pixel_temp, const long npix_temp, const long * pixel_pol, const long npix_pol, const int ordering, error **err) { double *pos_temp,*pos_pol; double *res; pos_temp = lowly_get_posvec(nside,pixel_temp,npix_temp,ordering,err); forwardError(*err,__LINE__,NULL); pos_pol = lowly_get_posvec(nside,pixel_pol,npix_pol,ordering,err); forwardError(*err,__LINE__,NULL); res = build_trigo_matrices_general(npix_temp,npix_pol,pos_temp,pos_pol,err); forwardError(*err,__LINE__,NULL); free(pos_temp); if (pos_pol != NULL) free(pos_pol); return res; } #undef __FUNC__ #define __FUNC__ "build_cov_matrix_pol_general" // Beware: needs to be used in conjonction with build_trigo_matrices_general() double* build_cov_matrix_pol_general (double *orig, const double *trig_mat, const double *noisevar, const long npix_temp, const long npix_pol, const long lmax, const double * qtt, const double * qee, const double * qbb, const double * qte, const double * qtb, const double * qeb, error **err) { long i,j; double cb, c2a, c2g, s2a, s2g; double covTT; double covcross[2]; double covQU[3]; double *covmat; long npix_tot, npix_sum; double *cb_ij,*c2psi_ij,*s2psi_ij; npix_tot = npix_temp+2*npix_pol; // Linear size of covariance matrix, size of noisevar array npix_sum = npix_temp+npix_pol; // Linear size of angle matrices cb_ij = trig_mat; c2psi_ij = trig_mat + npix_sum*npix_sum; s2psi_ij = trig_mat + 2*npix_sum*npix_sum; MALLOC_IF_NEEDED(covmat,orig,_SZT_(npix_tot)*_SZT_(npix_tot)*sizeof(double),err); forwardError(*err,__LINE__,NULL); memset((void*)covmat,0,npix_tot*npix_tot*sizeof(double)); /*_DEBUGHERE_("TT %g %g %g %g %g %g",qtt[0],qtt[1],qtt[2],qtt[3],qtt[4],qtt[5]); _DEBUGHERE_("EE %g %g %g %g %g %g",qee[0],qee[1],qee[2],qee[3],qee[4],qee[5]) _DEBUGHERE_("BB %g %g %g %g %g %g",qbb[0],qbb[1],qbb[2],qbb[3],qbb[4],qbb[5]) _DEBUGHERE_("TE %g %g %g %g %g %g",qtt[0],qte[1],qte[2],qte[3],qte[4],qte[5]) _DEBUGHERE_("TB %g %g %g %g %g %g",qtt[0],qtb[1],qtb[2],qtb[3],qtb[4],qtb[5]) _DEBUGHERE_("EB %g %g %g %g %g %g",qeb[0],qeb[1],qeb[2],qeb[3],qeb[4],qeb[5])*/ // Compute blocks separately now // Temperature block //_DEBUGHERE_("omp T",""); #pragma omp parallel for default (shared) private (i,j,cb,covTT) for (i=0;i<npix_temp;i++) { for (j=0;j<npix_temp;j++) { cb = cb_ij[i*npix_sum+j]; covTT = scalCov(cb,lmax,qtt); if ((i==j) && (noisevar!=NULL)) { covTT += noisevar[i]; } covmat[i*npix_tot+j] = covTT; } } // TQ and TU blocks //_DEBUGHERE_("omp PT",""); #pragma omp parallel for default (shared) private (i,j,cb,c2g,s2g,covcross) for (i=0;i<npix_temp;i++) { for (j=0;j<npix_pol;j++) { cb = cb_ij[i*npix_sum+npix_temp+j]; c2g = c2psi_ij[(j+npix_temp)*npix_sum+i]; s2g = s2psi_ij[(j+npix_temp)*npix_sum+i]; //_DEBUGHERE_("%g %g %g",cb,c2g,s2g); scalCovCross(covcross,cb,c2g,s2g,lmax,qte,qtb); // PATCH: TAKE -transpose(TQ,TU): needs to find why !!! // //_DEBUGHERE_("%g %g",covcross[0],covcross[1]); covmat[(j+npix_temp)*npix_tot+i]=-covcross[0]; //TQ lower covmat[i*npix_tot+npix_temp+j]=-covcross[0]; //TQ upper covmat[(j+npix_temp+npix_pol)*npix_tot+i]=-covcross[1]; //TU lower covmat[i*npix_tot+npix_temp+npix_pol+j]=-covcross[1]; //TU upper } } // QQ, UU and QU blocks //_DEBUGHERE_("omp P",""); #pragma omp parallel for default (shared) private (i,j,cb,c2a,s2a,c2g,s2g,covQU) for (i=0;i<npix_pol;i++) { for (j=0;j<npix_pol; j++) { cb = cb_ij[(i+npix_temp)*npix_sum+npix_temp+j]; c2a = c2psi_ij[(i+npix_temp)*npix_sum+npix_temp+j]; s2a = s2psi_ij[(i+npix_temp)*npix_sum+npix_temp+j]; c2g = c2psi_ij[(j+npix_temp)*npix_sum+npix_temp+i]; s2g = s2psi_ij[(j+npix_temp)*npix_sum+npix_temp+i]; scalCovQU(covQU,cb,c2a,s2a,c2g,s2g,lmax,qee,qbb,qeb); if ((i==j) &&(noisevar!=NULL)) {//add noise, assumes diag noise maps for TT, QQ, UU covQU[0] += noisevar[i+npix_temp]; covQU[2] += noisevar[i+npix_temp+npix_pol]; } covmat[(i+npix_temp)*npix_tot+npix_temp+j]=covQU[0]; // QQ covmat[(i+npix_temp+npix_pol)*npix_tot+npix_temp+npix_pol+j] = covQU[2]; // UU covmat[(i+npix_temp+npix_pol)*npix_tot+npix_temp+j] = covQU[1]; // QU lower covmat[(j+npix_temp)*npix_tot+npix_temp+npix_pol+i] = covQU[1]; // QU lower } } return covmat; } #undef __FUNC__ #define __FUNC__ "init_plowly" plowly *init_plowly(int nside,char* ordering, unsigned char *mask_T, unsigned char *mask_P, double *mapT,double *mapQ, double *mapU, double *Ndiag, double* N,int reduced, long lmax,int nell, int *ell, double *Cl,int *has_cl, error **err) { plowly *self; int order; long *pix_T,*pix_P; long npix_t,npix_p; int i,j,ox,oy,offset; self = malloc_err(sizeof(plowly), err); forwardError(*err,__LINE__,NULL); SET_PRINT_STAT(self); self->nside=nside; order = lowly_which_order(ordering,err); forwardError(*err,__LINE__,NULL); npix_t = 12*nside*nside; pix_T = lowly_build_pixel_list(mask_T, &npix_t, err); forwardError(*err,__LINE__,NULL); if (mask_P==NULL || mapQ==NULL || mapU==NULL) { pix_P = NULL; npix_p = 0; } else { npix_p = 12*nside*nside; pix_P = lowly_build_pixel_list(mask_P, &npix_p, err); forwardError(*err,__LINE__,NULL); } self->npix_p = npix_p; self->npix_t = npix_t; self->npix = 2*npix_p + npix_t; _DEBUGHERE_("%d %d",npix_t,npix_p); self->trig_mat = build_trig_mat_general_plist(self->nside,pix_T,npix_t,pix_P,npix_p,order,err); forwardError(*err,__LINE__,NULL); self->buffer = malloc_err(sizeof(double)*(_SZT_(self->npix) * _SZT_(self->npix) + self->npix*2),err); forwardError(*err,__LINE__,NULL); self->S = self->buffer; self->X = self->S + self->npix*self->npix; self->X_temp = self->X + self->npix; _DEBUGHERE_("%g",mapT[0]); for(i=0;i<self->npix_t;i++) { self->X[i] = mapT[pix_T[i]]; } if (mapQ!=NULL) _DEBUGHERE_("%g",mapQ[0]); for(i=0;i<self->npix_p;i++) { self->X[i+self->npix_t] = mapQ[pix_P[i]]; } if (mapU!=NULL) _DEBUGHERE_("%g",mapU[0]); for(i=0;i<self->npix_p;i++) { self->X[i+self->npix_t+self->npix_p] = mapU[pix_P[i]]; } self->Cl = malloc_err(sizeof(double)*(lmax+1)*2*6,err); forwardError(*err,__LINE__,NULL); memcpy(self->Cl,Cl,sizeof(double)*(lmax+1)*6); self->tCl = self->Cl + (lmax+1)*6; self->nell = nell; self->ell = lowly_get_ell(&(self->nell),ell,lmax,err); forwardError(*err,__LINE__,NULL); self->lmax= lmax; lowly_get_offset_cl(has_cl,self->offset_cl,self->nell); /*_DEBUGHERE_("%d %d %d %d %d %d %d",self->nell,self->offset_cl[0],self->offset_cl[1],self->offset_cl[2],self->offset_cl[3],self->offset_cl[4],self->offset_cl[5]); _DEBUGHERE_("TT %g %g %g %g %g %g",self->Cl[0+self->offset_cl[0]],self->Cl[1+self->offset_cl[0]],self->Cl[2+self->offset_cl[0]],self->Cl[3+self->offset_cl[0]],self->Cl[4+self->offset_cl[0]],self->Cl[5+self->offset_cl[0]]); _DEBUGHERE_("EE %g %g %g %g %g %g",self->Cl[0+self->offset_cl[1]],self->Cl[1+self->offset_cl[1]],self->Cl[2+self->offset_cl[1]],self->Cl[3+self->offset_cl[1]],self->Cl[4+self->offset_cl[1]],self->Cl[5+self->offset_cl[1]]); _DEBUGHERE_("BB %g %g %g %g %g %g",self->Cl[0+self->offset_cl[2]],self->Cl[1+self->offset_cl[2]],self->Cl[2+self->offset_cl[2]],self->Cl[3+self->offset_cl[2]],self->Cl[4+self->offset_cl[2]],self->Cl[5+self->offset_cl[2]]); _DEBUGHERE_("TE %g %g %g %g %g %g",self->Cl[0+self->offset_cl[3]],self->Cl[1+self->offset_cl[3]],self->Cl[2+self->offset_cl[3]],self->Cl[3+self->offset_cl[3]],self->Cl[4+self->offset_cl[3]],self->Cl[5+self->offset_cl[3]]); _DEBUGHERE_("TB %g %g %g %g %g %g",self->Cl[0+self->offset_cl[4]],self->Cl[1+self->offset_cl[4]],self->Cl[2+self->offset_cl[4]],self->Cl[3+self->offset_cl[4]],self->Cl[4+self->offset_cl[4]],self->Cl[5+self->offset_cl[4]]); _DEBUGHERE_("EB %g %g %g %g %g %g",self->Cl[0+self->offset_cl[5]],self->Cl[1+self->offset_cl[5]],self->Cl[2+self->offset_cl[5]],self->Cl[3+self->offset_cl[5]],self->Cl[4+self->offset_cl[5]],self->Cl[5+self->offset_cl[5]]); */ if (Ndiag!=NULL) { self->Ndiag = malloc_err(sizeof(double)*self->npix,err); forwardError(*err,__LINE__,NULL); self->N=NULL; if(reduced==1) { memcpy(self->Ndiag, Ndiag, sizeof(double)*self->npix); } else { for(i=0;i<npix_t;i++) { self->Ndiag[i]=Ndiag[pix_T[i]]; } for(i=0;i<npix_p;i++) { self->Ndiag[npix_t+i]=Ndiag[12*nside*nside+pix_P[i]]; } for(i=0;i<npix_p;i++) { self->Ndiag[npix_p+npix_t+i]=Ndiag[12*nside*nside*2+pix_P[i]]; } } } else { self->N = malloc_err(sizeof(double)*_SZT_(self->npix)*_SZT_(self->npix),err); forwardError(*err,__LINE__,NULL); self->Ndiag=NULL; if(reduced==1) { memcpy(self->N, N, sizeof(double)*_SZT_(self->npix)*_SZT_(self->npix)); } else { for(i=0;i<self->npix;i++) { if(i<self->npix_t) { ox = pix_T[i]; } else if(i<self->npix_t+self->npix_p) { ox = pix_P[i-self->npix_t] + 12*nside*nside; } else { ox = pix_P[i-self->npix_t-self->npix_p] + 12*nside*nside*2; } for(j=0;j<self->npix;j++) { if(j<self->npix_t) { oy = pix_T[j]; } else if(j<self->npix_t+self->npix_p) { oy = pix_P[j-self->npix_t] + 12*nside*nside; } else { oy = pix_P[j-self->npix_t-self->npix_p] + 12*nside*nside*2; } self->N[i*self->npix+j] = N[ox*12*nside*nside*3+oy]; } } } } free(pix_T); if (mask_P==NULL) { free(pix_P); } self->time_build = 0; self->time_tot = 0; self->time_chol = 0; self->n_tests = 0; return self; } #undef __FUNC__ #define __FUNC__ "plowly_build_S" double* plowly_build_S(double* orig,plowly *self,double* pars,error **err) { int i,j,ioff,ilm,jls,lmp1; double *res; lmp1 = (self->lmax+1); // prepare Cls memcpy(self->tCl, self->Cl, lmp1*6); for(i=0;i<6;i++) { if (self->offset_cl[i]!=-1) { ilm = i*lmp1; ioff = self->offset_cl[i]; for(j=0;j<self->nell;j++) { jls = self->ell[j]; self->tCl[ilm+jls]=pars[ioff+j]; } } } //build cov matrice res=build_cov_matrix_pol_general(orig,self->trig_mat,self->Ndiag, self->npix_t, self->npix_p,self->lmax, self->tCl+ 0*lmp1,self->tCl + 1*lmp1, self->tCl+ 2*lmp1,self->tCl + 3*lmp1, self->tCl+ 4*lmp1,self->tCl + 5*lmp1, err); forwardError(*err,__LINE__,NULL); // add noise if needed if (self->N!=NULL) { int sz,one; double fone; sz=self->npix*self->npix; one=1; fone=1; daxpy(&sz, &fone, self->N, &one, res, &one); } return res; } #undef __FUNC__ #define __FUNC__ "plowly_lkl" double plowly_lkl(void* pself, double* pars, error **err) { plowly* self; int tot_time,i,j; double res; TIMER_DEFS; self=pself; tot_time=0; // build S TIMER_IN; plowly_build_S(self->S,self,pars,err); forwardError(*err,__LINE__,0); TIMER_OUT; tot_time+=TIMER_MSEC; self->time_build += TIMER_MSEC; // compute lkl; TIMER_IN; res = lowly_XtRX_lkl(self->S,self->X,self->X_temp,self->npix,err); forwardError(*err,__LINE__,0); TIMER_OUT; tot_time+=TIMER_MSEC; self->time_chol += TIMER_MSEC; tot_time+=TIMER_MSEC; self->time_tot += tot_time; self->n_tests++; return res; } #undef __FUNC__ #define __FUNC__ "free_plowly" void free_plowly(void **pself) { plowly *self; self=*pself; DO_PRINT_STAT(self); free(self->trig_mat); free(self->buffer); free(self->ell); free(self->Cl); if (self->N!=NULL) { free(self->N); } else { free(self->Ndiag); } free(self); *pself=NULL; }
convolution_3x3_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd64_transform_kernel_pack4_msa(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd63 transform kernel Mat kernel_tm; kernel_tm.create(8 * 8, inch, outch); const float ktm[8][3] = { {1.0f, 0.0f, 0.0f}, {-2.0f / 9, -2.0f / 9, -2.0f / 9}, {-2.0f / 9, 2.0f / 9, -2.0f / 9}, {1.0f / 90, 1.0f / 45, 2.0f / 45}, {1.0f / 90, -1.0f / 45, 2.0f / 45}, {1.0f / 45, 1.0f / 90, 1.0f / 180}, {1.0f / 45, -1.0f / 90, 1.0f / 180}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel, transposed const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[8][3]; for (int i = 0; i < 8; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // v for (int j = 0; j < 8; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 8; i++) { kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 64-inch-outch // dst = pb-pa-inch/pa-64-outch/pb kernel_tm_pack4.create(inch / 4, 64, outch / 4, (size_t)4u * 4 * 4, 4 * 4); for (int q = 0; q + (4 - 1) < outch; q += 4) { Mat g0 = kernel_tm_pack4.channel(q / 4); for (int k = 0; k < 64; k++) { float* g00 = g0.row<float>(k); for (int p = 0; p + (4 - 1) < inch; p += 4) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (float)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd64_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 6n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 5) / 6 * 6; outh = (outh + 5) / 6 * 6; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); bottom_blob_tm.create(tiles, 64, inch, 4u * elempack, elempack, opt.workspace_allocator); // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][4]; v4f32 _v5_25 = __msa_fill_w_f32(5.25f); v4f32 _vm4_25 = __msa_fill_w_f32(-4.25f); v4f32 _vm1_25 = __msa_fill_w_f32(-1.25f); v4f32 _v0_25 = __msa_fill_w_f32(0.25f); v4f32 _vm2_5 = __msa_fill_w_f32(-2.5f); v4f32 _v0_5 = __msa_fill_w_f32(0.5f); v4f32 _v2 = __msa_fill_w_f32(2.f); v4f32 _v4 = __msa_fill_w_f32(4.f); // tile for (int i = 0; i < h_tm / 8; i++) { for (int j = 0; j < w_tm / 8; j++) { const float* r0 = img0.row(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _r05 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0); v4f32 _r06 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0); v4f32 _r07 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0); v4f32 _tmp0m = __msa_fmadd_w(__msa_fsub_w(_r00, _r06), _v5_25, __msa_fsub_w(_r04, _r02)); v4f32 _tmp7m = __msa_fmadd_w(__msa_fsub_w(_r07, _r01), _v5_25, __msa_fsub_w(_r03, _r05)); __msa_st_w((v4i32)_tmp0m, tmp[0][m], 0); __msa_st_w((v4i32)_tmp7m, tmp[7][m], 0); v4f32 _tmp12a = __msa_fmadd_w(__msa_fadd_w(_r02, _r06), _vm4_25, _r04); v4f32 _tmp12b = __msa_fmadd_w(__msa_fadd_w(_r01, _r05), _vm4_25, _r03); v4f32 _tmp1m = __msa_fadd_w(_tmp12a, _tmp12b); v4f32 _tmp2m = __msa_fsub_w(_tmp12a, _tmp12b); __msa_st_w((v4i32)_tmp1m, tmp[1][m], 0); __msa_st_w((v4i32)_tmp2m, tmp[2][m], 0); v4f32 _tmp34a = __msa_fmadd_w(__msa_fmadd_w(_r06, _v0_25, _r02), _vm1_25, _r04); v4f32 _tmp34b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_r01, _v0_5), _vm2_5, _r03), _v2, _r05); v4f32 _tmp3m = __msa_fadd_w(_tmp34a, _tmp34b); v4f32 _tmp4m = __msa_fsub_w(_tmp34a, _tmp34b); __msa_st_w((v4i32)_tmp3m, tmp[3][m], 0); __msa_st_w((v4i32)_tmp4m, tmp[4][m], 0); v4f32 _tmp56a = __msa_fmadd_w(_r06, _v4, __msa_fmadd_w(_r02, _vm1_25, _r04)); v4f32 _tmp56b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_r01, _v2), _vm2_5, _r03), _v0_5, _r05); v4f32 _tmp5m = __msa_fadd_w(_tmp56a, _tmp56b); v4f32 _tmp6m = __msa_fsub_w(_tmp56a, _tmp56b); __msa_st_w((v4i32)_tmp5m, tmp[5][m], 0); __msa_st_w((v4i32)_tmp6m, tmp[6][m], 0); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 8 + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3; float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4; float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5; float* r0_tm_6 = r0_tm_0 + tiles * 4 * 6; float* r0_tm_7 = r0_tm_0 + tiles * 4 * 7; for (int m = 0; m < 8; m++) { v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0); v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0); v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0); v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0); v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0); v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0); v4f32 _tmp06 = (v4f32)__msa_ld_w(tmp[m][6], 0); v4f32 _tmp07 = (v4f32)__msa_ld_w(tmp[m][7], 0); v4f32 _r0tm0 = __msa_fmadd_w(__msa_fsub_w(_tmp00, _tmp06), _v5_25, __msa_fsub_w(_tmp04, _tmp02)); v4f32 _r0tm7 = __msa_fmadd_w(__msa_fsub_w(_tmp07, _tmp01), _v5_25, __msa_fsub_w(_tmp03, _tmp05)); v4f32 _tmp12a = __msa_fmadd_w(__msa_fadd_w(_tmp02, _tmp06), _vm4_25, _tmp04); v4f32 _tmp12b = __msa_fmadd_w(__msa_fadd_w(_tmp01, _tmp05), _vm4_25, _tmp03); v4f32 _r0tm1 = __msa_fadd_w(_tmp12a, _tmp12b); v4f32 _r0tm2 = __msa_fsub_w(_tmp12a, _tmp12b); v4f32 _tmp34a = __msa_fmadd_w(__msa_fmadd_w(_tmp06, _v0_25, _tmp02), _vm1_25, _tmp04); v4f32 _tmp34b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_tmp01, _v0_5), _vm2_5, _tmp03), _v2, _tmp05); v4f32 _r0tm3 = __msa_fadd_w(_tmp34a, _tmp34b); v4f32 _r0tm4 = __msa_fsub_w(_tmp34a, _tmp34b); v4f32 _tmp56a = __msa_fmadd_w(_tmp06, _v4, __msa_fmadd_w(_tmp02, _vm1_25, _tmp04)); v4f32 _tmp56b = __msa_fmadd_w(__msa_fmadd_w(__msa_fmul_w(_tmp01, _v2), _vm2_5, _tmp03), _v0_5, _tmp05); v4f32 _r0tm5 = __msa_fadd_w(_tmp56a, _tmp56b); v4f32 _r0tm6 = __msa_fsub_w(_tmp56a, _tmp56b); __msa_st_w((v4i32)_r0tm0, r0_tm_0, 0); __msa_st_w((v4i32)_r0tm1, r0_tm_1, 0); __msa_st_w((v4i32)_r0tm2, r0_tm_2, 0); __msa_st_w((v4i32)_r0tm3, r0_tm_3, 0); __msa_st_w((v4i32)_r0tm4, r0_tm_4, 0); __msa_st_w((v4i32)_r0tm5, r0_tm_5, 0); __msa_st_w((v4i32)_r0tm6, r0_tm_6, 0); __msa_st_w((v4i32)_r0tm7, r0_tm_7, 0); r0_tm_0 += tiles * 4 * 8; r0_tm_1 += tiles * 4 * 8; r0_tm_2 += tiles * 4 * 8; r0_tm_3 += tiles * 4 * 8; r0_tm_4 += tiles * 4 * 8; r0_tm_5 += tiles * 4 * 8; r0_tm_6 += tiles * 4 * 8; r0_tm_7 += tiles * 4 * 8; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = h_tm / 8 * w_tm / 8; // permute // bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 64; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { float* tmpptr = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r4 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _r5 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0); v4f32 _r6 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0); v4f32 _r7 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0); v4f32 _r8 = (v4f32)__msa_ld_w(r0 + 4 * 8, 0); v4f32 _r9 = (v4f32)__msa_ld_w(r0 + 4 * 9, 0); v4f32 _ra = (v4f32)__msa_ld_w(r0 + 4 * 10, 0); v4f32 _rb = (v4f32)__msa_ld_w(r0 + 4 * 11, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4); v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4); v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6); v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6); v4i32 _r89r = __msa_ilvr_w((v4i32)_r9, (v4i32)_r8); v4i32 _r89l = __msa_ilvl_w((v4i32)_r9, (v4i32)_r8); v4i32 _rabr = __msa_ilvr_w((v4i32)_rb, (v4i32)_ra); v4i32 _rabl = __msa_ilvl_w((v4i32)_rb, (v4i32)_ra); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r89ab_0 = __msa_ilvr_d((v2i64)_rabr, (v2i64)_r89r); v2i64 _r89ab_1 = __msa_ilvl_d((v2i64)_rabr, (v2i64)_r89r); v2i64 _r89ab_2 = __msa_ilvr_d((v2i64)_rabl, (v2i64)_r89l); v2i64 _r89ab_3 = __msa_ilvl_d((v2i64)_rabl, (v2i64)_r89l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0); __msa_st_w((v4i32)_r89ab_0, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 3, 0); __msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 4, 0); __msa_st_w((v4i32)_r89ab_1, tmpptr + 4 * 5, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 6, 0); __msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 7, 0); __msa_st_w((v4i32)_r89ab_2, tmpptr + 4 * 8, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 9, 0); __msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 10, 0); __msa_st_w((v4i32)_r89ab_3, tmpptr + 4 * 11, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 48; } } for (; i + 7 < tiles; i += 8) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r4 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _r5 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0); v4f32 _r6 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0); v4f32 _r7 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4); v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4); v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6); v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 3, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 4, 0); __msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 5, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 6, 0); __msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 7, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 32; } } for (; i + 3 < tiles; i += 4) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x4 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 3, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 16; } } for (; i + 1 < tiles; i += 2) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x2 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4i32 _r01_0 = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01_1 = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); __msa_st_w((v4i32)_r01_0, tmpptr, 0); __msa_st_w((v4i32)_r01_1, tmpptr + 4, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 8; } } for (; i < tiles; i++) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { v4f32 _val = (v4f32)__msa_ld_w(r0, 0); __msa_st_w((v4i32)_val, tmpptr, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 64, outch, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 64; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); v4f32 _sum2 = (v4f32)__msa_fill_w(0); v4f32 _sum3 = (v4f32)__msa_fill_w(0); v4f32 _sum4 = (v4f32)__msa_fill_w(0); v4f32 _sum5 = (v4f32)__msa_fill_w(0); v4f32 _sum6 = (v4f32)__msa_fill_w(0); v4f32 _sum7 = (v4f32)__msa_fill_w(0); v4f32 _sum8 = (v4f32)__msa_fill_w(0); v4f32 _sum9 = (v4f32)__msa_fill_w(0); v4f32 _suma = (v4f32)__msa_fill_w(0); v4f32 _sumb = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 96); __builtin_prefetch(k0 + 32); v4i32 _val0123 = __msa_ld_w(r0, 0); v4i32 _val4567 = __msa_ld_w(r0 + 4, 0); v4i32 _val89ab = __msa_ld_w(r0 + 8, 0); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); _sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0); _sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0); _sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0); _sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0); _sum8 = __msa_fmadd_w(_sum8, (v4f32)__msa_splati_w(_val89ab, 0), _w0); _sum9 = __msa_fmadd_w(_sum9, (v4f32)__msa_splati_w(_val89ab, 1), _w0); _suma = __msa_fmadd_w(_suma, (v4f32)__msa_splati_w(_val89ab, 2), _w0); _sumb = __msa_fmadd_w(_sumb, (v4f32)__msa_splati_w(_val89ab, 3), _w0); r0 += 12; k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); __msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0); __msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0); __msa_st_w((v4i32)_sum4, output0_tm + 4 * 4, 0); __msa_st_w((v4i32)_sum5, output0_tm + 4 * 5, 0); __msa_st_w((v4i32)_sum6, output0_tm + 4 * 6, 0); __msa_st_w((v4i32)_sum7, output0_tm + 4 * 7, 0); __msa_st_w((v4i32)_sum8, output0_tm + 4 * 8, 0); __msa_st_w((v4i32)_sum9, output0_tm + 4 * 9, 0); __msa_st_w((v4i32)_suma, output0_tm + 4 * 10, 0); __msa_st_w((v4i32)_sumb, output0_tm + 4 * 11, 0); output0_tm += 4 * 12; } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); v4f32 _sum2 = (v4f32)__msa_fill_w(0); v4f32 _sum3 = (v4f32)__msa_fill_w(0); v4f32 _sum4 = (v4f32)__msa_fill_w(0); v4f32 _sum5 = (v4f32)__msa_fill_w(0); v4f32 _sum6 = (v4f32)__msa_fill_w(0); v4f32 _sum7 = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 64); __builtin_prefetch(k0 + 32); v4i32 _val0123 = __msa_ld_w(r0, 0); v4i32 _val4567 = __msa_ld_w(r0 + 4, 0); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); _sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0); _sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0); _sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0); _sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0); r0 += 8; k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); __msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0); __msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0); __msa_st_w((v4i32)_sum4, output0_tm + 4 * 4, 0); __msa_st_w((v4i32)_sum5, output0_tm + 4 * 5, 0); __msa_st_w((v4i32)_sum6, output0_tm + 4 * 6, 0); __msa_st_w((v4i32)_sum7, output0_tm + 4 * 7, 0); output0_tm += 4 * 8; } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); v4f32 _sum2 = (v4f32)__msa_fill_w(0); v4f32 _sum3 = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 32); __builtin_prefetch(k0 + 32); v4i32 _val0123 = __msa_ld_w(r0, 0); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); r0 += 4; k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); __msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0); __msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0); output0_tm += 4 * 4; } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 16); __builtin_prefetch(k0 + 32); v4f32 _val0 = __msa_fill_w_f32(*r0++); v4f32 _val1 = __msa_fill_w_f32(*r0++); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, _val0, _w0); _sum1 = __msa_fmadd_w(_sum1, _val1, _w0); k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); output0_tm += 4 * 2; } for (; i < tiles; i++) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 8); __builtin_prefetch(k0 + 32); v4f32 _val0 = __msa_fill_w_f32(*r0++); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum = __msa_fmadd_w(_sum, _val0, _w0); k0 += 4; } __msa_st_w((v4i32)_sum, output0_tm, 0); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) int w_tm = outw / 6 * 8; int h_tm = outh / 6 * 8; const int tiles = w_tm / 8 * h_tm / 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; v4f32 _bias0 = bias ? (v4f32)__msa_ld_w((const float*)bias + p * 4, 0) : (v4f32)__msa_fill_w(0); float tmp[6][8][4]; v4f32 _v32 = __msa_fill_w_f32(32.f); v4f32 _v16 = __msa_fill_w_f32(16.f); v4f32 _v8 = __msa_fill_w_f32(8.f); v4f32 _v4 = __msa_fill_w_f32(4.f); v4f32 _v2 = __msa_fill_w_f32(2.f); // tile for (int i = 0; i < outh / 6; i++) { for (int j = 0; j < outw / 6; j++) { // top_blob_tm.create(tiles, 64, outch, elemsize, elempack); const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 8 + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5; const float* output0_tm_6 = output0_tm_0 + tiles * 4 * 6; const float* output0_tm_7 = output0_tm_0 + tiles * 4 * 7; float* output0 = out0.row<float>(i * 6) + (j * 6) * 4; // TODO msa optimize for (int m = 0; m < 8; m++) { v4f32 _out0tm0 = (v4f32)__msa_ld_w(output0_tm_0, 0); v4f32 _out0tm1 = (v4f32)__msa_ld_w(output0_tm_1, 0); v4f32 _out0tm2 = (v4f32)__msa_ld_w(output0_tm_2, 0); v4f32 _out0tm3 = (v4f32)__msa_ld_w(output0_tm_3, 0); v4f32 _out0tm4 = (v4f32)__msa_ld_w(output0_tm_4, 0); v4f32 _out0tm5 = (v4f32)__msa_ld_w(output0_tm_5, 0); v4f32 _out0tm6 = (v4f32)__msa_ld_w(output0_tm_6, 0); v4f32 _out0tm7 = (v4f32)__msa_ld_w(output0_tm_7, 0); v4f32 _tmp024a = __msa_fadd_w(_out0tm1, _out0tm2); v4f32 _tmp135a = __msa_fsub_w(_out0tm1, _out0tm2); v4f32 _tmp024b = __msa_fadd_w(_out0tm3, _out0tm4); v4f32 _tmp135b = __msa_fsub_w(_out0tm3, _out0tm4); v4f32 _tmp024c = __msa_fadd_w(_out0tm5, _out0tm6); v4f32 _tmp135c = __msa_fsub_w(_out0tm5, _out0tm6); v4f32 _tmp0m = __msa_fadd_w(__msa_fadd_w(_out0tm0, _tmp024a), __msa_fmadd_w(_tmp024b, _v32, _tmp024c)); v4f32 _tmp2m = __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v4, _tmp024b), _v8, _tmp024c); v4f32 _tmp4m = __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v16, _tmp024b), _v2, _tmp024c); __msa_st_w((v4i32)_tmp0m, tmp[0][m], 0); __msa_st_w((v4i32)_tmp2m, tmp[2][m], 0); __msa_st_w((v4i32)_tmp4m, tmp[4][m], 0); v4f32 _tmp1m = __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v2, _tmp135b), _v16, _tmp135c); v4f32 _tmp3m = __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v8, _tmp135b), _v4, _tmp135c); v4f32 _tmp5m = __msa_fadd_w(__msa_fadd_w(_out0tm7, _tmp135a), __msa_fmadd_w(_tmp135c, _v32, _tmp135b)); __msa_st_w((v4i32)_tmp1m, tmp[1][m], 0); __msa_st_w((v4i32)_tmp3m, tmp[3][m], 0); __msa_st_w((v4i32)_tmp5m, tmp[5][m], 0); output0_tm_0 += tiles * 4 * 8; output0_tm_1 += tiles * 4 * 8; output0_tm_2 += tiles * 4 * 8; output0_tm_3 += tiles * 4 * 8; output0_tm_4 += tiles * 4 * 8; output0_tm_5 += tiles * 4 * 8; output0_tm_6 += tiles * 4 * 8; output0_tm_7 += tiles * 4 * 8; } for (int m = 0; m < 6; m++) { v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0); v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0); v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0); v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0); v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0); v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0); v4f32 _tmp06 = (v4f32)__msa_ld_w(tmp[m][6], 0); v4f32 _tmp07 = (v4f32)__msa_ld_w(tmp[m][7], 0); v4f32 _tmp024a = __msa_fadd_w(_tmp01, _tmp02); v4f32 _tmp135a = __msa_fsub_w(_tmp01, _tmp02); v4f32 _tmp024b = __msa_fadd_w(_tmp03, _tmp04); v4f32 _tmp135b = __msa_fsub_w(_tmp03, _tmp04); v4f32 _tmp024c = __msa_fadd_w(_tmp05, _tmp06); v4f32 _tmp135c = __msa_fsub_w(_tmp05, _tmp06); v4f32 _out00 = __msa_fadd_w(_bias0, __msa_fadd_w(__msa_fadd_w(_tmp00, _tmp024a), __msa_fmadd_w(_tmp024b, _v32, _tmp024c))); v4f32 _out02 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v4, _tmp024b), _v8, _tmp024c)); v4f32 _out04 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp024a, _v16, _tmp024b), _v2, _tmp024c)); __msa_st_w((v4i32)_out00, output0, 0); __msa_st_w((v4i32)_out02, output0 + 4 * 2, 0); __msa_st_w((v4i32)_out04, output0 + 4 * 4, 0); v4f32 _out01 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v2, _tmp135b), _v16, _tmp135c)); v4f32 _out03 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fmadd_w(_tmp135a, _v8, _tmp135b), _v4, _tmp135c)); v4f32 _out05 = __msa_fadd_w(_bias0, __msa_fadd_w(__msa_fadd_w(_tmp07, _tmp135a), __msa_fmadd_w(_tmp135c, _v32, _tmp135b))); __msa_st_w((v4i32)_out01, output0 + 4, 0); __msa_st_w((v4i32)_out03, output0 + 4 * 3, 0); __msa_st_w((v4i32)_out05, output0 + 4 * 5, 0); output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); } static void conv3x3s1_winograd42_transform_kernel_pack4_msa(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt) { // winograd42 transform kernel Mat kernel_tm(6 * 6, inch, outch); const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = pb-pa-inch/pa-36-outch/pb kernel_tm_pack4.create(inch / 4, 36, outch / 4, (size_t)4u * 4 * 4, 4 * 4); for (int q = 0; q + (4 - 1) < outch; q += 4) { Mat g0 = kernel_tm_pack4.channel(q / 4); for (int k = 0; k < 36; k++) { float* g00 = g0.row<float>(k); for (int p = 0; p + (4 - 1) < inch; p += 4) { for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { const float* k00 = kernel_tm.channel(q + j).row(p + i); g00[0] = (float)k00[k]; g00++; } } } } } } static void conv3x3s1_winograd42_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; bottom_blob_tm.create(tiles, 36, inch, 4u * elempack, elempack, opt.workspace_allocator); // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[6][6][4]; v4f32 _vm5 = __msa_fill_w_f32(-5.f); v4f32 _vm4 = __msa_fill_w_f32(-4.f); v4f32 _v4 = __msa_fill_w_f32(4.f); v4f32 _vm2 = __msa_fill_w_f32(-2.f); v4f32 _v2 = __msa_fill_w_f32(2.f); // tile for (int i = 0; i < h_tm / 6; i++) { for (int j = 0; j < w_tm / 6; j++) { const float* r0 = img0.row(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _r05 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0); v4f32 _tmp0m = __msa_fmadd_w(__msa_fmadd_w(_r04, _v4, _r00), _vm5, _r02); v4f32 _tmp1m = __msa_fmadd_w(__msa_fadd_w(_r04, _r03), _vm4, __msa_fadd_w(_r01, _r02)); v4f32 _tmp2m = __msa_fmadd_w(__msa_fsub_w(_r04, _r03), _v4, __msa_fsub_w(_r01, _r02)); v4f32 _tmp3m = __msa_fmadd_w(__msa_fsub_w(_r04, _r02), _vm2, __msa_fsub_w(_r01, _r03)); v4f32 _tmp4m = __msa_fmadd_w(__msa_fsub_w(_r04, _r02), _v2, __msa_fsub_w(_r01, _r03)); v4f32 _tmp5m = __msa_fmadd_w(__msa_fmadd_w(_r05, _v4, _r01), _vm5, _r03); __msa_st_w((v4i32)_tmp0m, tmp[0][m], 0); __msa_st_w((v4i32)_tmp1m, tmp[1][m], 0); __msa_st_w((v4i32)_tmp2m, tmp[2][m], 0); __msa_st_w((v4i32)_tmp3m, tmp[3][m], 0); __msa_st_w((v4i32)_tmp4m, tmp[4][m], 0); __msa_st_w((v4i32)_tmp5m, tmp[5][m], 0); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tm / 6 + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 4 * 2; float* r0_tm_3 = r0_tm_0 + tiles * 4 * 3; float* r0_tm_4 = r0_tm_0 + tiles * 4 * 4; float* r0_tm_5 = r0_tm_0 + tiles * 4 * 5; for (int m = 0; m < 6; m++) { v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0); v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0); v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0); v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0); v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0); v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0); v4f32 _r0tm0 = __msa_fmadd_w(__msa_fmadd_w(_tmp04, _v4, _tmp00), _vm5, _tmp02); v4f32 _r0tm1 = __msa_fmadd_w(__msa_fadd_w(_tmp04, _tmp03), _vm4, __msa_fadd_w(_tmp01, _tmp02)); v4f32 _r0tm2 = __msa_fmadd_w(__msa_fsub_w(_tmp04, _tmp03), _v4, __msa_fsub_w(_tmp01, _tmp02)); v4f32 _r0tm3 = __msa_fmadd_w(__msa_fsub_w(_tmp04, _tmp02), _vm2, __msa_fsub_w(_tmp01, _tmp03)); v4f32 _r0tm4 = __msa_fmadd_w(__msa_fsub_w(_tmp04, _tmp02), _v2, __msa_fsub_w(_tmp01, _tmp03)); v4f32 _r0tm5 = __msa_fmadd_w(__msa_fmadd_w(_tmp05, _v4, _tmp01), _vm5, _tmp03); __msa_st_w((v4i32)_r0tm0, r0_tm_0, 0); __msa_st_w((v4i32)_r0tm1, r0_tm_1, 0); __msa_st_w((v4i32)_r0tm2, r0_tm_2, 0); __msa_st_w((v4i32)_r0tm3, r0_tm_3, 0); __msa_st_w((v4i32)_r0tm4, r0_tm_4, 0); __msa_st_w((v4i32)_r0tm5, r0_tm_5, 0); r0_tm_0 += tiles * 4 * 6; r0_tm_1 += tiles * 4 * 6; r0_tm_2 += tiles * 4 * 6; r0_tm_3 += tiles * 4 * 6; r0_tm_4 += tiles * 4 * 6; r0_tm_5 += tiles * 4 * 6; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; if (tiles >= 12) bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 2) bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; for (; i + 11 < tiles; i += 12) { float* tmpptr = tm2.row(i / 12); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r4 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _r5 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0); v4f32 _r6 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0); v4f32 _r7 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0); v4f32 _r8 = (v4f32)__msa_ld_w(r0 + 4 * 8, 0); v4f32 _r9 = (v4f32)__msa_ld_w(r0 + 4 * 9, 0); v4f32 _ra = (v4f32)__msa_ld_w(r0 + 4 * 10, 0); v4f32 _rb = (v4f32)__msa_ld_w(r0 + 4 * 11, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4); v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4); v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6); v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6); v4i32 _r89r = __msa_ilvr_w((v4i32)_r9, (v4i32)_r8); v4i32 _r89l = __msa_ilvl_w((v4i32)_r9, (v4i32)_r8); v4i32 _rabr = __msa_ilvr_w((v4i32)_rb, (v4i32)_ra); v4i32 _rabl = __msa_ilvl_w((v4i32)_rb, (v4i32)_ra); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r89ab_0 = __msa_ilvr_d((v2i64)_rabr, (v2i64)_r89r); v2i64 _r89ab_1 = __msa_ilvl_d((v2i64)_rabr, (v2i64)_r89r); v2i64 _r89ab_2 = __msa_ilvr_d((v2i64)_rabl, (v2i64)_r89l); v2i64 _r89ab_3 = __msa_ilvl_d((v2i64)_rabl, (v2i64)_r89l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0); __msa_st_w((v4i32)_r89ab_0, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 3, 0); __msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 4, 0); __msa_st_w((v4i32)_r89ab_1, tmpptr + 4 * 5, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 6, 0); __msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 7, 0); __msa_st_w((v4i32)_r89ab_2, tmpptr + 4 * 8, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 9, 0); __msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 10, 0); __msa_st_w((v4i32)_r89ab_3, tmpptr + 4 * 11, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 48; } } for (; i + 7 < tiles; i += 8) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x8 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r4 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _r5 = (v4f32)__msa_ld_w(r0 + 4 * 5, 0); v4f32 _r6 = (v4f32)__msa_ld_w(r0 + 4 * 6, 0); v4f32 _r7 = (v4f32)__msa_ld_w(r0 + 4 * 7, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v4i32 _r45r = __msa_ilvr_w((v4i32)_r5, (v4i32)_r4); v4i32 _r45l = __msa_ilvl_w((v4i32)_r5, (v4i32)_r4); v4i32 _r67r = __msa_ilvr_w((v4i32)_r7, (v4i32)_r6); v4i32 _r67l = __msa_ilvl_w((v4i32)_r7, (v4i32)_r6); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r4567_0 = __msa_ilvr_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_1 = __msa_ilvl_d((v2i64)_r67r, (v2i64)_r45r); v2i64 _r4567_2 = __msa_ilvr_d((v2i64)_r67l, (v2i64)_r45l); v2i64 _r4567_3 = __msa_ilvl_d((v2i64)_r67l, (v2i64)_r45l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r4567_0, tmpptr + 4, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r4567_1, tmpptr + 4 * 3, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 4, 0); __msa_st_w((v4i32)_r4567_2, tmpptr + 4 * 5, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 6, 0); __msa_st_w((v4i32)_r4567_3, tmpptr + 4 * 7, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 32; } } for (; i + 3 < tiles; i += 4) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x4 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r2 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r3 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4i32 _r01r = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01l = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); v4i32 _r23r = __msa_ilvr_w((v4i32)_r3, (v4i32)_r2); v4i32 _r23l = __msa_ilvl_w((v4i32)_r3, (v4i32)_r2); v2i64 _r0123_0 = __msa_ilvr_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_1 = __msa_ilvl_d((v2i64)_r23r, (v2i64)_r01r); v2i64 _r0123_2 = __msa_ilvr_d((v2i64)_r23l, (v2i64)_r01l); v2i64 _r0123_3 = __msa_ilvl_d((v2i64)_r23l, (v2i64)_r01l); __msa_st_w((v4i32)_r0123_0, tmpptr, 0); __msa_st_w((v4i32)_r0123_1, tmpptr + 4, 0); __msa_st_w((v4i32)_r0123_2, tmpptr + 4 * 2, 0); __msa_st_w((v4i32)_r0123_3, tmpptr + 4 * 3, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 16; } } for (; i + 1 < tiles; i += 2) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { // transpose 4x2 v4f32 _r0 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r1 = (v4f32)__msa_ld_w(r0 + 4, 0); v4i32 _r01_0 = __msa_ilvr_w((v4i32)_r1, (v4i32)_r0); v4i32 _r01_1 = __msa_ilvl_w((v4i32)_r1, (v4i32)_r0); __msa_st_w((v4i32)_r01_0, tmpptr, 0); __msa_st_w((v4i32)_r01_1, tmpptr + 4, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 8; } } for (; i < tiles; i++) { float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 4; for (int q = 0; q < inch; q++) { v4f32 _val = (v4f32)__msa_ld_w(r0, 0); __msa_st_w((v4i32)_val, tmpptr, 0); r0 += bottom_blob_tm.cstep * 4; tmpptr += 4; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 4u * elempack, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; for (; i + 11 < tiles; i += 12) { const float* r0 = bb2.row(i / 12); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); v4f32 _sum2 = (v4f32)__msa_fill_w(0); v4f32 _sum3 = (v4f32)__msa_fill_w(0); v4f32 _sum4 = (v4f32)__msa_fill_w(0); v4f32 _sum5 = (v4f32)__msa_fill_w(0); v4f32 _sum6 = (v4f32)__msa_fill_w(0); v4f32 _sum7 = (v4f32)__msa_fill_w(0); v4f32 _sum8 = (v4f32)__msa_fill_w(0); v4f32 _sum9 = (v4f32)__msa_fill_w(0); v4f32 _suma = (v4f32)__msa_fill_w(0); v4f32 _sumb = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 96); __builtin_prefetch(k0 + 32); v4i32 _val0123 = __msa_ld_w(r0, 0); v4i32 _val4567 = __msa_ld_w(r0 + 4, 0); v4i32 _val89ab = __msa_ld_w(r0 + 8, 0); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); _sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0); _sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0); _sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0); _sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0); _sum8 = __msa_fmadd_w(_sum8, (v4f32)__msa_splati_w(_val89ab, 0), _w0); _sum9 = __msa_fmadd_w(_sum9, (v4f32)__msa_splati_w(_val89ab, 1), _w0); _suma = __msa_fmadd_w(_suma, (v4f32)__msa_splati_w(_val89ab, 2), _w0); _sumb = __msa_fmadd_w(_sumb, (v4f32)__msa_splati_w(_val89ab, 3), _w0); r0 += 12; k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); __msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0); __msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0); __msa_st_w((v4i32)_sum4, output0_tm + 4 * 4, 0); __msa_st_w((v4i32)_sum5, output0_tm + 4 * 5, 0); __msa_st_w((v4i32)_sum6, output0_tm + 4 * 6, 0); __msa_st_w((v4i32)_sum7, output0_tm + 4 * 7, 0); __msa_st_w((v4i32)_sum8, output0_tm + 4 * 8, 0); __msa_st_w((v4i32)_sum9, output0_tm + 4 * 9, 0); __msa_st_w((v4i32)_suma, output0_tm + 4 * 10, 0); __msa_st_w((v4i32)_sumb, output0_tm + 4 * 11, 0); output0_tm += 4 * 12; } for (; i + 7 < tiles; i += 8) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); v4f32 _sum2 = (v4f32)__msa_fill_w(0); v4f32 _sum3 = (v4f32)__msa_fill_w(0); v4f32 _sum4 = (v4f32)__msa_fill_w(0); v4f32 _sum5 = (v4f32)__msa_fill_w(0); v4f32 _sum6 = (v4f32)__msa_fill_w(0); v4f32 _sum7 = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 64); __builtin_prefetch(k0 + 32); v4i32 _val0123 = __msa_ld_w(r0, 0); v4i32 _val4567 = __msa_ld_w(r0 + 4, 0); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); _sum4 = __msa_fmadd_w(_sum4, (v4f32)__msa_splati_w(_val4567, 0), _w0); _sum5 = __msa_fmadd_w(_sum5, (v4f32)__msa_splati_w(_val4567, 1), _w0); _sum6 = __msa_fmadd_w(_sum6, (v4f32)__msa_splati_w(_val4567, 2), _w0); _sum7 = __msa_fmadd_w(_sum7, (v4f32)__msa_splati_w(_val4567, 3), _w0); r0 += 8; k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); __msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0); __msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0); __msa_st_w((v4i32)_sum4, output0_tm + 4 * 4, 0); __msa_st_w((v4i32)_sum5, output0_tm + 4 * 5, 0); __msa_st_w((v4i32)_sum6, output0_tm + 4 * 6, 0); __msa_st_w((v4i32)_sum7, output0_tm + 4 * 7, 0); output0_tm += 4 * 8; } for (; i + 3 < tiles; i += 4) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); v4f32 _sum2 = (v4f32)__msa_fill_w(0); v4f32 _sum3 = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 32); __builtin_prefetch(k0 + 32); v4i32 _val0123 = __msa_ld_w(r0, 0); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, (v4f32)__msa_splati_w(_val0123, 0), _w0); _sum1 = __msa_fmadd_w(_sum1, (v4f32)__msa_splati_w(_val0123, 1), _w0); _sum2 = __msa_fmadd_w(_sum2, (v4f32)__msa_splati_w(_val0123, 2), _w0); _sum3 = __msa_fmadd_w(_sum3, (v4f32)__msa_splati_w(_val0123, 3), _w0); r0 += 4; k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); __msa_st_w((v4i32)_sum2, output0_tm + 4 * 2, 0); __msa_st_w((v4i32)_sum3, output0_tm + 4 * 3, 0); output0_tm += 4 * 4; } for (; i + 1 < tiles; i += 2) { const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* k0 = kernel0_tm.row(r); int nn = inch * 4; // inch always > 0 v4f32 _sum0 = (v4f32)__msa_fill_w(0); v4f32 _sum1 = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 16); __builtin_prefetch(k0 + 32); v4f32 _val0 = __msa_fill_w_f32(*r0++); v4f32 _val1 = __msa_fill_w_f32(*r0++); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum0 = __msa_fmadd_w(_sum0, _val0, _w0); _sum1 = __msa_fmadd_w(_sum1, _val1, _w0); k0 += 4; } __msa_st_w((v4i32)_sum0, output0_tm, 0); __msa_st_w((v4i32)_sum1, output0_tm + 4, 0); output0_tm += 4 * 2; } for (; i < tiles; i++) { const float* r0 = bb2.row<const float>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* k0 = kernel0_tm.row<const float>(r); int nn = inch * 4; // inch always > 0 v4f32 _sum = (v4f32)__msa_fill_w(0); for (int j = 0; j < nn; j++) { __builtin_prefetch(r0 + 8); __builtin_prefetch(k0 + 32); v4f32 _val0 = __msa_fill_w_f32(*r0++); v4f32 _w0 = (v4f32)__msa_ld_w(k0, 0); _sum = __msa_fmadd_w(_sum, _val0, _w0); k0 += 4; } __msa_st_w((v4i32)_sum, output0_tm, 0); output0_tm += 4; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator); } { // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); // const float bias0 = bias ? bias[p] : 0.f; v4f32 _bias0 = bias ? (v4f32)__msa_ld_w((const float*)bias + p * 4, 0) : (v4f32)__msa_fill_w(0); float tmp[4][6][4]; v4f32 _v2 = __msa_fill_w_f32(2.f); v4f32 _v4 = __msa_fill_w_f32(4.f); v4f32 _v8 = __msa_fill_w_f32(8.f); // tile for (int i = 0; i < outh / 4; i++) { for (int j = 0; j < outw / 4; j++) { // top_blob_tm.create(tiles, 36, outch, elemsize, elempack); const float* output0_tm_0 = (const float*)out0_tm + (i * w_tm / 6 + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 4 * 2; const float* output0_tm_3 = output0_tm_0 + tiles * 4 * 3; const float* output0_tm_4 = output0_tm_0 + tiles * 4 * 4; const float* output0_tm_5 = output0_tm_0 + tiles * 4 * 5; float* output0 = out0.row<float>(i * 4) + (j * 4) * 4; // TODO msa optimize for (int m = 0; m < 6; m++) { v4f32 _out0tm0 = (v4f32)__msa_ld_w(output0_tm_0, 0); v4f32 _out0tm1 = (v4f32)__msa_ld_w(output0_tm_1, 0); v4f32 _out0tm2 = (v4f32)__msa_ld_w(output0_tm_2, 0); v4f32 _out0tm3 = (v4f32)__msa_ld_w(output0_tm_3, 0); v4f32 _out0tm4 = (v4f32)__msa_ld_w(output0_tm_4, 0); v4f32 _out0tm5 = (v4f32)__msa_ld_w(output0_tm_5, 0); v4f32 _tmp02a = __msa_fadd_w(_out0tm1, _out0tm2); v4f32 _tmp13a = __msa_fsub_w(_out0tm1, _out0tm2); v4f32 _tmp02b = __msa_fadd_w(_out0tm3, _out0tm4); v4f32 _tmp13b = __msa_fsub_w(_out0tm3, _out0tm4); v4f32 _tmp0m = __msa_fadd_w(__msa_fadd_w(_out0tm0, _tmp02a), _tmp02b); v4f32 _tmp1m = __msa_fmadd_w(_tmp13a, _v2, _tmp13b); v4f32 _tmp2m = __msa_fmadd_w(_tmp02a, _v4, _tmp02b); v4f32 _tmp3m = __msa_fmadd_w(__msa_fadd_w(_out0tm5, _tmp13a), _v8, _tmp13b); __msa_st_w((v4i32)_tmp0m, tmp[0][m], 0); __msa_st_w((v4i32)_tmp1m, tmp[1][m], 0); __msa_st_w((v4i32)_tmp2m, tmp[2][m], 0); __msa_st_w((v4i32)_tmp3m, tmp[3][m], 0); output0_tm_0 += tiles * 4 * 6; output0_tm_1 += tiles * 4 * 6; output0_tm_2 += tiles * 4 * 6; output0_tm_3 += tiles * 4 * 6; output0_tm_4 += tiles * 4 * 6; output0_tm_5 += tiles * 4 * 6; } for (int m = 0; m < 4; m++) { v4f32 _tmp00 = (v4f32)__msa_ld_w(tmp[m][0], 0); v4f32 _tmp01 = (v4f32)__msa_ld_w(tmp[m][1], 0); v4f32 _tmp02 = (v4f32)__msa_ld_w(tmp[m][2], 0); v4f32 _tmp03 = (v4f32)__msa_ld_w(tmp[m][3], 0); v4f32 _tmp04 = (v4f32)__msa_ld_w(tmp[m][4], 0); v4f32 _tmp05 = (v4f32)__msa_ld_w(tmp[m][5], 0); v4f32 _tmp02a = __msa_fadd_w(_tmp01, _tmp02); v4f32 _tmp13a = __msa_fsub_w(_tmp01, _tmp02); v4f32 _tmp02b = __msa_fadd_w(_tmp03, _tmp04); v4f32 _tmp13b = __msa_fsub_w(_tmp03, _tmp04); v4f32 _out00 = __msa_fadd_w(_bias0, __msa_fadd_w(__msa_fadd_w(_tmp00, _tmp02a), _tmp02b)); v4f32 _out01 = __msa_fadd_w(_bias0, __msa_fmadd_w(_tmp13a, _v2, _tmp13b)); v4f32 _out02 = __msa_fadd_w(_bias0, __msa_fmadd_w(_tmp02a, _v4, _tmp02b)); v4f32 _out03 = __msa_fadd_w(_bias0, __msa_fmadd_w(__msa_fadd_w(_tmp05, _tmp13a), _v8, _tmp13b)); __msa_st_w((v4i32)_out00, output0, 0); __msa_st_w((v4i32)_out01, output0 + 4, 0); __msa_st_w((v4i32)_out02, output0 + 4 * 2, 0); __msa_st_w((v4i32)_out03, output0 + 4 * 3, 0); output0 += outw * 4; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
bicgstab.c
#include<stdio.h> #include<stdlib.h> #include<omp.h> #include "include/SpUtil.h" #include "include/myblas.h" #include "include/precond.h" struct CRS_matrix{ int N; int nnz; int* row_ptr; int* col_ind; double* val; }; int main(int argc, char **argv){ // e.g. ./a.out data/test.mtx if(argc!=2){ printf("error! $1 is matrix market file name\n"); abort(); } //////////////////// // Create CRS format matrix from Matrixmarket format file // See https://zenn.dev/hishinuma_t/books/sparse-matrix-and-vector-product /////////////////// int N, nnz; //file open and read header to get matrix size FILE* fp = fopen(argv[1], "r"); SpUtil_read_mm_header(fp, &N, &nnz); //printf("N = %d, nnz = %d\n", N, nnz); // allocate COO array int* coo_row_index = (int*)malloc(sizeof(int)*nnz); int* coo_col_index = (int*)malloc(sizeof(int)*nnz); double* coo_val = (double*)malloc(sizeof(double)*nnz); // create COO from file SpUtil_mm2coo(fp, N, nnz, coo_row_index, coo_col_index, coo_val); // close fclose(fp); // allocate CRS array struct CRS_matrix A; A.row_ptr = (int*)malloc(sizeof(int)*(N+1)); A.col_ind = (int*)malloc(sizeof(int)*nnz); A.val = (double*)malloc(sizeof(double)*nnz); A.N = N; A.nnz = nnz; // crate CRS from COO SpUtil_coo2crs(N,nnz, coo_row_index, coo_col_index, coo_val, A.row_ptr, A.col_ind, A.val); // debug // SpUtil_print_crs(A.N, A.nnz, A.row_ptr, A.col_ind, A.val); free(coo_row_index); free(coo_col_index); free(coo_val); //////////////////// // Initialize BiCGSTAB vectors // BiCGSTAB /////////////////// //change me size_t maxiter = A.N*100; double tol = 1.0e-12; double* b = (double*)malloc(sizeof(double)*(A.N)); double* x = (double*)malloc(sizeof(double)*(A.N)); //change me #pragma omp parallel for for(size_t i = 0; i < A.N; i++){ x[i] = 0.0; //initial x b[i] = 1.0; //initial b } double* r = (double*)malloc(sizeof(double)*(A.N)); double* r0 = (double*)malloc(sizeof(double)*(A.N)); double* p = (double*)malloc(sizeof(double)*(A.N)); double* phat = (double*)malloc(sizeof(double)*(A.N)); double* s = (double*)malloc(sizeof(double)*(A.N)); double* shat = (double*)malloc(sizeof(double)*(A.N)); double* v = (double*)malloc(sizeof(double)*(A.N)); double* t = (double*)malloc(sizeof(double)*(A.N)); double* M = (double*)malloc(sizeof(double)*(A.N)); //for precond //////////////////// // BiCGSTAB /////////////////// double rho_old = 1.0, rho = 1.0, alpha = 1.0, beta, omega = 1.0; create_precond_jacobi(A.N, A.row_ptr, A.col_ind, A.val, M); // r = b-Ax matvec(A.N, A.row_ptr, A.col_ind, A.val, x, r); #pragma omp parallel for for(size_t i = 0; i < A.N; i++){ r[i] = b[i] - r[i]; } // r0 = r, (r*0, r0)!=0 copy(A.N, r, r0); for (size_t iter = 0; iter < maxiter; iter++) { // alpha = (r(i-1), r0) / (AM^-1*p(i-1), r0) rho = dot(A.N, r, r0); if (rho == 0.0) { printf("%ld: %e\n", iter+1, rho); printf("success\n"); free(A.row_ptr); free(A.col_ind); free(A.val); free(M); free(r); free(r0); free(v); free(t); free(p); free(phat); free(s); free(shat); free(x); free(b); return 0; } if (iter == 0) { copy(A.N, r, p); } else { // beta = (rho / rho_old) * (alpha / omega) beta = (rho / rho_old) * (alpha / omega); // p = r + beta(p + omega * AM-1 p(i-1) ) axpy(A.N, -omega, v, p); // p = -omega*v + p xpay(A.N, beta, r, p); // p = r + beta*p } // phat = M^-1 p(i-1) apply_precond_jacobi(A.N, M, p, phat); // v = AM^-1p(i-1) matvec(A.N, A.row_ptr, A.col_ind, A.val, phat, v); alpha = rho / dot(A.N, v, r0); // s(i) = r(i-1) - alpha v axpyz(A.N, -alpha, v, r, s); // shat = M^-1 s(i) apply_precond_jacobi(A.N, M, s, shat); // t = A * shat matvec(A.N, A.row_ptr, A.col_ind, A.val, shat, t); // omega = (AM-1s, s) / (AM-1s, AM-1s) omega = dot(A.N, t, s) / dot(A.N, t, t); if (omega == 0.0) { printf("fail (div zero breakdown)\n"); free(A.row_ptr); free(A.col_ind); free(A.val); free(M); free(r); free(r0); free(v); free(t); free(p); free(phat); free(s); free(shat); free(x); free(b); return -1; } // x(i) = x(i-1) + alpha * M^-1 p(i-1) + omega * M^-1 s(i) axpy(A.N, alpha, phat, x); axpy(A.N, omega, shat, x); // r(i) = s(i-1) - omega * AM^-1 s(i-1) axpyz(A.N, -omega, t, s, r); // convergence check double resid = nrm2(A.N, r); printf("%ld: %e\n", iter+1, resid); if (resid < tol) { printf("success\n"); free(A.row_ptr); free(A.col_ind); free(A.val); free(M); free(r); free(r0); free(v); free(t); free(p); free(phat); free(s); free(shat); free(x); free(b); return 0; } rho_old = rho; } printf("fail (maxiter)\n"); free(A.row_ptr); free(A.col_ind); free(A.val); free(M); free(r); free(r0); free(v); free(t); free(p); free(phat); free(s); free(shat); free(x); free(b); return -2; }
omp_prod_vet.c
#include <stdio.h> #include <omp.h> #define tamanho 100 int main () { int i, chunk; float a[tamanho], b[tamanho], result; /* Some initializations */ chunk = 10; result = 0.0; for (i=0; i < tamanho; i++) { a[i] = i * 1.0; b[i] = i * 2.0; } #pragma omp parallel for \ default(shared) private(i) \ schedule(static,chunk) \ reduction(+:result) for (i=0; i < tamanho; i++) result = result + (a[i] * b[i]); printf("Final result= %f\n",result); }
idaFoodWeb_bnd_omp.c
/* * ----------------------------------------------------------------- * Programmer(s): Daniel R. Reynolds and Ting Yan @ SMU * Based on idaFoodWeb_bnd.c and parallelized with OpenMP * ----------------------------------------------------------------- * SUNDIALS Copyright Start * Copyright (c) 2002-2020, Lawrence Livermore National Security * and Southern Methodist University. * All rights reserved. * * See the top-level LICENSE and NOTICE files for details. * * SPDX-License-Identifier: BSD-3-Clause * SUNDIALS Copyright End * ----------------------------------------------------------------- * Example program for IDA: Food web problem. * * This example program (OpenMP version) uses the SUNBAND linear * solver, and IDACalcIC for initial condition calculation. * * The mathematical problem solved in this example is a DAE system * that arises from a system of partial differential equations after * spatial discretization. The PDE system is a food web population * model, with predator-prey interaction and diffusion on the unit * square in two dimensions. The dependent variable vector is: * * 1 2 ns * c = (c , c , ..., c ) , ns = 2 * np * * and the PDE's are as follows: * * i i i * dc /dt = d(i)*(c + c ) + R (x,y,c) (i = 1,...,np) * xx yy i * * i i * 0 = d(i)*(c + c ) + R (x,y,c) (i = np+1,...,ns) * xx yy i * * where the reaction terms R are: * * i ns j * R (x,y,c) = c * (b(i) + sum a(i,j)*c ) * i j=1 * * The number of species is ns = 2 * np, with the first np being * prey and the last np being predators. The coefficients a(i,j), * b(i), d(i) are: * * a(i,i) = -AA (all i) * a(i,j) = -GG (i <= np , j > np) * a(i,j) = EE (i > np, j <= np) * all other a(i,j) = 0 * b(i) = BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i <= np) * b(i) =-BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i > np) * d(i) = DPREY (i <= np) * d(i) = DPRED (i > np) * * The various scalar parameters required are set using '#define' * statements or directly in routine InitUserData. In this program, * np = 1, ns = 2. The boundary conditions are homogeneous Neumann: * normal derivative = 0. * * A polynomial in x and y is used to set the initial values of the * first np variables (the prey variables) at each x,y location, * while initial values for the remaining (predator) variables are * set to a flat value, which is corrected by IDACalcIC. * * The PDEs are discretized by central differencing on a MX by MY * mesh. * * The DAE system is solved by IDA using the SUNBAND linear solver. * Output is printed at t = 0, .001, .01, .1, .4, .7, 1. * * Optionally, we can set the number of threads from environment * variable or command line. To check the current value for number * of threads from environment: * % echo $OMP_NUM_THREADS * * Execution: * * To use the default value for the number of threads from * the OMP_NUM_THREADS environment value: * % ./idaFoodWeb_bnd_omp * To specify the number of threads at the command line, use * % ./idaFoodWeb_bnd_omp num_threads * where num_threads is the desired number of threads. * * ----------------------------------------------------------------- * References: * [1] Peter N. Brown and Alan C. Hindmarsh, * Reduced Storage Matrix Methods in Stiff ODE systems, Journal * of Applied Mathematics and Computation, Vol. 31 (May 1989), * pp. 40-91. * * [2] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold, * Using Krylov Methods in the Solution of Large-Scale * Differential-Algebraic Systems, SIAM J. Sci. Comput., 15 * (1994), pp. 1467-1488. * * [3] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold, * Consistent Initial Condition Calculation for Differential- * Algebraic Systems, SIAM J. Sci. Comput., 19 (1998), * pp. 1495-1512. * ----------------------------------------------------------------- */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <ida/ida.h> #include <sunmatrix/sunmatrix_band.h> #include <sunlinsol/sunlinsol_band.h> #include <nvector/nvector_openmp.h> #include <sundials/sundials_direct.h> #include <sundials/sundials_types.h> #ifdef _OPENMP #include <omp.h> #endif /* Problem Constants. */ #define NPREY 1 /* No. of prey (= no. of predators). */ #define NUM_SPECIES 2*NPREY #define PI RCONST(3.1415926535898) #define FOURPI (RCONST(4.0)*PI) #define MX 20 /* MX = number of x mesh points */ #define MY 20 /* MY = number of y mesh points */ #define NSMX (NUM_SPECIES * MX) #define NEQ (NUM_SPECIES*MX*MY) #define AA RCONST(1.0) /* Coefficient in above eqns. for a */ #define EE RCONST(10000.) /* Coefficient in above eqns. for a */ #define GG RCONST(0.5e-6) /* Coefficient in above eqns. for a */ #define BB RCONST(1.0) /* Coefficient in above eqns. for b */ #define DPREY RCONST(1.0) /* Coefficient in above eqns. for d */ #define DPRED RCONST(0.05) /* Coefficient in above eqns. for d */ #define ALPHA RCONST(50.) /* Coefficient alpha in above eqns. */ #define BETA RCONST(1000.) /* Coefficient beta in above eqns. */ #define AX RCONST(1.0) /* Total range of x variable */ #define AY RCONST(1.0) /* Total range of y variable */ #define RTOL RCONST(1.e-5) /* Relative tolerance */ #define ATOL RCONST(1.e-5) /* Absolute tolerance */ #define NOUT 6 /* Number of output times */ #define TMULT RCONST(10.0) /* Multiplier for tout values */ #define TADD RCONST(0.3) /* Increment for tout values */ #define ZERO RCONST(0.) #define ONE RCONST(1.0) /* * User-defined vector and accessor macro: IJ_Vptr. * IJ_Vptr is defined in order to express the underlying 3-D structure of * the dependent variable vector from its underlying 1-D storage (an N_Vector). * IJ_Vptr(vv,i,j) returns a pointer to the location in vv corresponding to * species index is = 0, x-index ix = i, and y-index jy = j. */ #define IJ_Vptr(vv,i,j) (&NV_Ith_OMP(vv, (i)*NUM_SPECIES + (j)*NSMX)) /* Type: UserData. Contains problem constants, etc. */ typedef struct { sunindextype Neq, ns, np, mx, my; realtype dx, dy, **acoef; realtype cox[NUM_SPECIES], coy[NUM_SPECIES], bcoef[NUM_SPECIES]; N_Vector rates; int nthreads; } *UserData; /* Prototypes for functions called by the IDA Solver. */ static int resweb(realtype time, N_Vector cc, N_Vector cp, N_Vector resval, void *user_data); /* Prototypes for private Helper Functions. */ static void InitUserData(UserData webdata); static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata); static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol); static void PrintOutput(void *ida_mem, N_Vector c, realtype t); static void PrintFinalStats(void *ida_mem); static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata); static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy, UserData webdata); static realtype dotprod(sunindextype size, realtype *x1, realtype *x2); static int check_retval(void *returnvalue, char *funcname, int opt); /* *-------------------------------------------------------------------- * MAIN PROGRAM *-------------------------------------------------------------------- */ int main(int argc, char *argv[]) { void *ida_mem; SUNMatrix A; SUNLinearSolver LS; UserData webdata; N_Vector cc, cp, id; int iout, retval; sunindextype mu, ml; realtype rtol, atol, t0, tout, tret; int num_threads; ida_mem = NULL; A = NULL; LS = NULL; webdata = NULL; cc = cp = id = NULL; /* Set the number of threads to use */ num_threads = 1; /* default value */ #ifdef _OPENMP num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS enviroment variable */ #endif if (argc > 1) /* overwrite with command line value, if supplied */ num_threads = (int) strtol(argv[1], NULL, 0); /* Allocate and initialize user data block webdata. */ webdata = (UserData) malloc(sizeof *webdata); webdata->rates = N_VNew_OpenMP(NEQ, num_threads); webdata->acoef = newDenseMat(NUM_SPECIES, NUM_SPECIES); webdata->nthreads = num_threads; InitUserData(webdata); /* Allocate N-vectors and initialize cc, cp, and id. */ cc = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)cc, "N_VNew_OpenMP", 0)) return(1); cp = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)cp, "N_VNew_OpenMP", 0)) return(1); id = N_VNew_OpenMP(NEQ, num_threads); if(check_retval((void *)id, "N_VNew_OpenMP", 0)) return(1); SetInitialProfiles(cc, cp, id, webdata); /* Set remaining inputs to IDAMalloc. */ t0 = ZERO; rtol = RTOL; atol = ATOL; /* Call IDACreate and IDAMalloc to initialize IDA. */ ida_mem = IDACreate(); if(check_retval((void *) ida_mem, "IDACreate", 0)) return(1); retval = IDASetUserData(ida_mem, webdata); if(check_retval(&retval, "IDASetUserData", 1)) return(1); retval = IDASetId(ida_mem, id); if(check_retval(&retval, "IDASetId", 1)) return(1); retval = IDAInit(ida_mem, resweb, t0, cc, cp); if(check_retval(&retval, "IDAInit", 1)) return(1); retval = IDASStolerances(ida_mem, rtol, atol); if(check_retval(&retval, "IDASStolerances", 1)) return(1); /* Setup band matrix and linear solver, and attach to IDA. */ mu = ml = NSMX; A = SUNBandMatrix(NEQ, mu, ml); if(check_retval((void *)A, "SUNBandMatrix", 0)) return(1); LS = SUNLinSol_Band(cc, A); if(check_retval((void *)LS, "SUNLinSol_Band", 0)) return(1); retval = IDASetLinearSolver(ida_mem, LS, A); if(check_retval(&retval, "IDASetLinearSolver", 1)) return(1); /* Call IDACalcIC (with default options) to correct the initial values. */ tout = RCONST(0.001); retval = IDACalcIC(ida_mem, IDA_YA_YDP_INIT, tout); if(check_retval(&retval, "IDACalcIC", 1)) return(1); /* Print heading, basic parameters, and initial values. */ PrintHeader(mu, ml, rtol, atol); PrintOutput(ida_mem, cc, ZERO); /* Loop over iout, call IDASolve (normal mode), print selected output. */ for (iout = 1; iout <= NOUT; iout++) { retval = IDASolve(ida_mem, tout, &tret, cc, cp, IDA_NORMAL); if(check_retval(&retval, "IDASolve", 1)) return(retval); PrintOutput(ida_mem, cc, tret); if (iout < 3) tout *= TMULT; else tout += TADD; } /* Print final statistics and free memory. */ PrintFinalStats(ida_mem); printf("num_threads = %i\n\n", num_threads); /* Free memory */ IDAFree(&ida_mem); SUNLinSolFree(LS); SUNMatDestroy(A); N_VDestroy_OpenMP(cc); N_VDestroy_OpenMP(cp); N_VDestroy_OpenMP(id); destroyMat(webdata->acoef); N_VDestroy_OpenMP(webdata->rates); free(webdata); return(0); } /* Define lines for readability in later routines */ #define acoef (webdata->acoef) #define bcoef (webdata->bcoef) #define cox (webdata->cox) #define coy (webdata->coy) /* *-------------------------------------------------------------------- * FUNCTIONS CALLED BY IDA *-------------------------------------------------------------------- */ /* * resweb: System residual function for predator-prey system. * This routine calls Fweb to get all the right-hand sides of the * equations, then loads the residual vector accordingly, * using cp in the case of prey species. */ static int resweb(realtype tt, N_Vector cc, N_Vector cp, N_Vector res, void *user_data) { sunindextype jx, jy, is, yloc, loc, np; realtype *resv, *cpv; UserData webdata; jx = jy = is = 0; webdata = (UserData)user_data; cpv = NV_DATA_OMP(cp); resv = NV_DATA_OMP(res); np = webdata->np; /* Call Fweb to set res to vector of right-hand sides. */ Fweb(tt, cc, res, webdata); /* Loop over all grid points, setting residual values appropriately for differential or algebraic components. */ #pragma omp parallel for default(shared) private(jy, yloc, jx, loc, is) schedule(static) num_threads(webdata->nthreads) for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) resv[loc+is] = cpv[loc+is] - resv[loc+is]; else resv[loc+is] = -resv[loc+is]; } } } return(0); } /* *-------------------------------------------------------------------- * PRIVATE FUNCTIONS *-------------------------------------------------------------------- */ /* * InitUserData: Load problem constants in webdata (of type UserData). */ static void InitUserData(UserData webdata) { sunindextype i, j, np; realtype *a1,*a2, *a3, *a4, dx2, dy2; webdata->mx = MX; webdata->my = MY; webdata->ns = NUM_SPECIES; webdata->np = NPREY; webdata->dx = AX/(MX-1); webdata->dy = AY/(MY-1); webdata->Neq= NEQ; /* Set up the coefficients a and b, and others found in the equations. */ np = webdata->np; dx2 = (webdata->dx)*(webdata->dx); dy2 = (webdata->dy)*(webdata->dy); for (i = 0; i < np; i++) { a1 = &(acoef[i][np]); a2 = &(acoef[i+np][0]); a3 = &(acoef[i][0]); a4 = &(acoef[i+np][np]); /* Fill in the portion of acoef in the four quadrants, row by row. */ for (j = 0; j < np; j++) { *a1++ = -GG; *a2++ = EE; *a3++ = ZERO; *a4++ = ZERO; } /* Reset the diagonal elements of acoef to -AA. */ acoef[i][i] = -AA; acoef[i+np][i+np] = -AA; /* Set coefficients for b and diffusion terms. */ bcoef[i] = BB; bcoef[i+np] = -BB; cox[i] = DPREY/dx2; cox[i+np] = DPRED/dx2; coy[i] = DPREY/dy2; coy[i+np] = DPRED/dy2; } } /* * SetInitialProfiles: Set initial conditions in cc, cp, and id. * A polynomial profile is used for the prey cc values, and a constant * (1.0e5) is loaded as the initial guess for the predator cc values. * The id values are set to 1 for the prey and 0 for the predators. * The prey cp values are set according to the given system, and * the predator cp values are set to zero. */ static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id, UserData webdata) { sunindextype loc, yloc, is, jx, jy, np; realtype xx, yy, xyfactor; realtype *ccv, *cpv, *idv; ccv = NV_DATA_OMP(cc); cpv = NV_DATA_OMP(cp); idv = NV_DATA_OMP(id); np = webdata->np; /* Loop over grid, load cc values and id values. */ for (jy = 0; jy < MY; jy++) { yy = jy * webdata->dy; yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { xx = jx * webdata->dx; xyfactor = RCONST(16.0)*xx*(ONE-xx)*yy*(ONE-yy); xyfactor *= xyfactor; loc = yloc + NUM_SPECIES*jx; for (is = 0; is < NUM_SPECIES; is++) { if (is < np) { ccv[loc+is] = RCONST(10.0) + (realtype)(is+1) * xyfactor; idv[loc+is] = ONE; } else { ccv[loc+is] = RCONST(1.0e5); idv[loc+is] = ZERO; } } } } /* Set c' for the prey by calling the function Fweb. */ Fweb(ZERO, cc, cp, webdata); /* Set c' for predators to 0. */ for (jy = 0; jy < MY; jy++) { yloc = NSMX * jy; for (jx = 0; jx < MX; jx++) { loc = yloc + NUM_SPECIES * jx; for (is = np; is < NUM_SPECIES; is++) { cpv[loc+is] = ZERO; } } } } /* * Print first lines of output (problem description) */ static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol) { printf("\nidaFoodWeb_bnd_omp: Predator-prey DAE OpenMP example problem for IDA \n\n"); printf("Number of species ns: %d", NUM_SPECIES); printf(" Mesh dimensions: %d x %d", MX, MY); printf(" System size: %d\n", NEQ); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("Tolerance parameters: rtol = %Lg atol = %Lg\n", rtol, atol); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #else printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol); #endif printf("Linear solver: SUNBAND, Band parameters mu = %ld, ml = %ld\n", (long int) mu, (long int) ml); printf("CalcIC called to correct initial predator concentrations.\n\n"); printf("-----------------------------------------------------------\n"); printf(" t bottom-left top-right"); printf(" | nst k h\n"); printf("-----------------------------------------------------------\n\n"); } /* * PrintOutput: Print output values at output time t = tt. * Selected run statistics are printed. Then values of the concentrations * are printed for the bottom left and top right grid points only. */ static void PrintOutput(void *ida_mem, N_Vector c, realtype t) { int i, kused, retval; long int nst; realtype *c_bl, *c_tr, hused; retval = IDAGetLastOrder(ida_mem, &kused); check_retval(&retval, "IDAGetLastOrder", 1); retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetLastStep(ida_mem, &hused); check_retval(&retval, "IDAGetLastStep", 1); c_bl = IJ_Vptr(c,0,0); c_tr = IJ_Vptr(c,MX-1,MY-1); #if defined(SUNDIALS_EXTENDED_PRECISION) printf("%8.2Le %12.4Le %12.4Le | %3ld %1d %12.4Le\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4Le %12.4Le |\n",c_bl[i],c_tr[i]); #elif defined(SUNDIALS_DOUBLE_PRECISION) printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]); #else printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n", t, c_bl[0], c_tr[0], nst, kused, hused); for (i=1;i<NUM_SPECIES;i++) printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]); #endif printf("\n"); } /* * PrintFinalStats: Print final run data contained in iopt. */ static void PrintFinalStats(void *ida_mem) { long int nst, nre, nreLS, nni, nje, netf, ncfn; int retval; retval = IDAGetNumSteps(ida_mem, &nst); check_retval(&retval, "IDAGetNumSteps", 1); retval = IDAGetNumNonlinSolvIters(ida_mem, &nni); check_retval(&retval, "IDAGetNumNonlinSolvIters", 1); retval = IDAGetNumResEvals(ida_mem, &nre); check_retval(&retval, "IDAGetNumResEvals", 1); retval = IDAGetNumErrTestFails(ida_mem, &netf); check_retval(&retval, "IDAGetNumErrTestFails", 1); retval = IDAGetNumNonlinSolvConvFails(ida_mem, &ncfn); check_retval(&retval, "IDAGetNumNonlinSolvConvFails", 1); retval = IDAGetNumJacEvals(ida_mem, &nje); check_retval(&retval, "IDAGetNumJacEvals", 1); retval = IDAGetNumLinResEvals(ida_mem, &nreLS); check_retval(&retval, "IDAGetNumLinResEvals", 1); printf("-----------------------------------------------------------\n"); printf("Final run statistics: \n\n"); printf("Number of steps = %ld\n", nst); printf("Number of residual evaluations = %ld\n", nre+nreLS); printf("Number of Jacobian evaluations = %ld\n", nje); printf("Number of nonlinear iterations = %ld\n", nni); printf("Number of error test failures = %ld\n", netf); printf("Number of nonlinear conv. failures = %ld\n", ncfn); } /* * Fweb: Rate function for the food-web problem. * This routine computes the right-hand sides of the system equations, * consisting of the diffusion term and interaction term. * The interaction term is computed by the function WebRates. */ static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata) { sunindextype jx, jy, is, idyu, idyl, idxu, idxl; realtype xx, yy, *cxy, *ratesxy, *cratexy, dcyli, dcyui, dcxli, dcxui; /* Loop over grid points, evaluate interaction vector (length ns), form diffusion difference terms, and load crate. */ jx = jy = is = 0; for (jy = 0; jy < MY; jy++) { yy = (webdata->dy) * jy ; idyu = (jy!=MY-1) ? NSMX : -NSMX; idyl = (jy!= 0 ) ? NSMX : -NSMX; for (jx = 0; jx < MX; jx++) { xx = (webdata->dx) * jx; idxu = (jx!= MX-1) ? NUM_SPECIES : -NUM_SPECIES; idxl = (jx!= 0 ) ? NUM_SPECIES : -NUM_SPECIES; cxy = IJ_Vptr(cc,jx,jy); ratesxy = IJ_Vptr(webdata->rates,jx,jy); cratexy = IJ_Vptr(crate,jx,jy); /* Get interaction vector at this grid point. */ WebRates(xx, yy, cxy, ratesxy, webdata); /* Loop over species, do differencing, load crate segment. */ #pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) schedule(static) num_threads(webdata->nthreads) for (is = 0; is < NUM_SPECIES; is++) { /* Differencing in y. */ dcyli = *(cxy+is) - *(cxy - idyl + is) ; dcyui = *(cxy + idyu + is) - *(cxy+is); /* Differencing in x. */ dcxli = *(cxy+is) - *(cxy - idxl + is); dcxui = *(cxy + idxu +is) - *(cxy+is); /* Compute the crate values at (xx,yy). */ cratexy[is] = coy[is] * (dcyui - dcyli) + cox[is] * (dcxui - dcxli) + ratesxy[is]; } /* End is loop */ } /* End of jx loop */ } /* End of jy loop */ } /* * WebRates: Evaluate reaction rates at a given spatial point. * At a given (x,y), evaluate the array of ns reaction terms R. */ static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy, UserData webdata) { int is; realtype fac; for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = dotprod(NUM_SPECIES, cxy, acoef[is]); fac = ONE + ALPHA*xx*yy + BETA*sin(FOURPI*xx)*sin(FOURPI*yy); for (is = 0; is < NUM_SPECIES; is++) ratesxy[is] = cxy[is]*( bcoef[is]*fac + ratesxy[is] ); } /* * dotprod: dot product routine for realtype arrays, for use by WebRates. */ static realtype dotprod(sunindextype size, realtype *x1, realtype *x2) { sunindextype i; realtype *xx1, *xx2, temp = ZERO; xx1 = x1; xx2 = x2; for (i = 0; i < size; i++) temp += (*xx1++) * (*xx2++); return(temp); } /* * Check function return value... * opt == 0 means SUNDIALS function allocates memory so check if * returned NULL pointer * opt == 1 means SUNDIALS function returns an integer value so check if * retval < 0 * opt == 2 means function allocates memory so check if returned * NULL pointer */ static int check_retval(void *returnvalue, char *funcname, int opt) { int *retval; if (opt == 0 && returnvalue == NULL) { /* Check if SUNDIALS function returned NULL pointer - no memory allocated */ fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } else if (opt == 1) { /* Check if retval < 0 */ retval = (int *) returnvalue; if (*retval < 0) { fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n", funcname, *retval); return(1); } } else if (opt == 2 && returnvalue == NULL) { /* Check if function returned NULL pointer - no memory allocated */ fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n", funcname); return(1); } return(0); }
GB_binop__isgt_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isgt_int16 // A.*B function (eWiseMult): GB_AemultB__isgt_int16 // A*D function (colscale): GB_AxD__isgt_int16 // D*A function (rowscale): GB_DxB__isgt_int16 // C+=B function (dense accum): GB_Cdense_accumB__isgt_int16 // C+=b function (dense accum): GB_Cdense_accumb__isgt_int16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isgt_int16 // C=scalar+B GB_bind1st__isgt_int16 // C=scalar+B' GB_bind1st_tran__isgt_int16 // C=A+scalar GB_bind2nd__isgt_int16 // C=A'+scalar GB_bind2nd_tran__isgt_int16 // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x > y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_INT16 || GxB_NO_ISGT_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isgt_int16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isgt_int16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isgt_int16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isgt_int16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isgt_int16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *GB_RESTRICT Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isgt_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isgt_int16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isgt_int16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isgt_int16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB_bind1st_tran__isgt_int16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB_bind2nd_tran__isgt_int16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
lock-unrelated.c
/* Copyright (c) 2015-2019, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Simone Atzeni (simone@cs.utah.edu), Joachim Protze (joachim.protze@tu-dresden.de), Jonas Hahnfeld (hahnfeld@itc.rwth-aachen.de), Ganesh Gopalakrishnan, Zvonimir Rakamaric, Dong H. Ahn, Gregory L. Lee, Ignacio Laguna, and Martin Schulz. LLNL-CODE-773957 All rights reserved. This file is part of Archer. For details, see https://pruners.github.io/archer. Please also read https://github.com/PRUNERS/archer/blob/master/LICENSE. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // RUN: %libarcher-compile-and-run-race | FileCheck %s #include <omp.h> #include <stdio.h> int main(int argc, char* argv[]) { int var = 0; omp_lock_t lock; omp_init_lock(&lock); #pragma omp parallel num_threads(2) shared(var) { omp_set_lock(&lock); // Dummy locking. omp_unset_lock(&lock); var++; } omp_destroy_lock(&lock); int error = (var != 2); fprintf(stderr, "DONE\n"); return error; } // CHECK: WARNING: ThreadSanitizer: data race // CHECK: Write of size 4 // CHECK: #0 .omp_outlined. // CHECK: Previous write of size 4 // CHECK: #0 .omp_outlined. // CHECK: DONE
tree-pretty-print.c
/* Pretty formatting of GENERIC trees in C syntax. Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc. Adapted from c-pretty-print.c by Diego Novillo <dnovillo@redhat.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "output.h" #include "diagnostic.h" #include "real.h" #include "hashtab.h" #include "tree-flow.h" #include "langhooks.h" #include "tree-iterator.h" #include "tree-chrec.h" #include "tree-pass.h" #include "fixed-value.h" #include "value-prof.h" #include "predict.h" /* Local functions, macros and variables. */ static const char *op_symbol (const_tree); static void pretty_print_string (pretty_printer *, const char*); static void newline_and_indent (pretty_printer *, int); static void maybe_init_pretty_print (FILE *); static void print_struct_decl (pretty_printer *, const_tree, int, int); static void do_niy (pretty_printer *, const_tree); #define INDENT(SPACE) do { \ int i; for (i = 0; i<SPACE; i++) pp_space (buffer); } while (0) #define NIY do_niy(buffer,node) static pretty_printer buffer; static int initialized = 0; /* Try to print something for an unknown tree code. */ static void do_niy (pretty_printer *buffer, const_tree node) { int i, len; pp_string (buffer, "<<< Unknown tree: "); pp_string (buffer, tree_code_name[(int) TREE_CODE (node)]); if (EXPR_P (node)) { len = TREE_OPERAND_LENGTH (node); for (i = 0; i < len; ++i) { newline_and_indent (buffer, 2); dump_generic_node (buffer, TREE_OPERAND (node, i), 2, 0, false); } } pp_string (buffer, " >>>\n"); } /* Debugging function to print out a generic expression. */ void debug_generic_expr (tree t) { print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS); fprintf (stderr, "\n"); } /* Debugging function to print out a generic statement. */ void debug_generic_stmt (tree t) { print_generic_stmt (stderr, t, TDF_VOPS|TDF_MEMSYMS); fprintf (stderr, "\n"); } /* Debugging function to print out a chain of trees . */ void debug_tree_chain (tree t) { struct pointer_set_t *seen = pointer_set_create (); while (t) { print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS|TDF_UID); fprintf (stderr, " "); t = TREE_CHAIN (t); if (pointer_set_insert (seen, t)) { fprintf (stderr, "... [cycled back to "); print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS|TDF_UID); fprintf (stderr, "]"); break; } } fprintf (stderr, "\n"); pointer_set_destroy (seen); } /* Prints declaration DECL to the FILE with details specified by FLAGS. */ void print_generic_decl (FILE *file, tree decl, int flags) { maybe_init_pretty_print (file); print_declaration (&buffer, decl, 2, flags); pp_write_text_to_stream (&buffer); } /* Print tree T, and its successors, on file FILE. FLAGS specifies details to show in the dump. See TDF_* in tree-pass.h. */ void print_generic_stmt (FILE *file, tree t, int flags) { maybe_init_pretty_print (file); dump_generic_node (&buffer, t, 0, flags, true); pp_flush (&buffer); } /* Print tree T, and its successors, on file FILE. FLAGS specifies details to show in the dump. See TDF_* in tree-pass.h. The output is indented by INDENT spaces. */ void print_generic_stmt_indented (FILE *file, tree t, int flags, int indent) { int i; maybe_init_pretty_print (file); for (i = 0; i < indent; i++) pp_space (&buffer); dump_generic_node (&buffer, t, indent, flags, true); pp_flush (&buffer); } /* Print a single expression T on file FILE. FLAGS specifies details to show in the dump. See TDF_* in tree-pass.h. */ void print_generic_expr (FILE *file, tree t, int flags) { maybe_init_pretty_print (file); dump_generic_node (&buffer, t, 0, flags, false); } /* Dump the name of a _DECL node and its DECL_UID if TDF_UID is set in FLAGS. */ static void dump_decl_name (pretty_printer *buffer, tree node, int flags) { if (DECL_NAME (node)) { if ((flags & TDF_ASMNAME) && DECL_ASSEMBLER_NAME_SET_P (node)) pp_tree_identifier (buffer, DECL_ASSEMBLER_NAME (node)); else pp_tree_identifier (buffer, DECL_NAME (node)); } if ((flags & TDF_UID) || DECL_NAME (node) == NULL_TREE) { if (TREE_CODE (node) == LABEL_DECL && LABEL_DECL_UID (node) != -1) pp_printf (buffer, "L.%d", (int) LABEL_DECL_UID (node)); else if (TREE_CODE (node) == DEBUG_EXPR_DECL) { if (flags & TDF_NOUID) pp_string (buffer, "D#xxxx"); else pp_printf (buffer, "D#%i", DEBUG_TEMP_UID (node)); } else { char c = TREE_CODE (node) == CONST_DECL ? 'C' : 'D'; if (flags & TDF_NOUID) pp_printf (buffer, "%c.xxxx", c); else pp_printf (buffer, "%c.%u", c, DECL_UID (node)); } } } /* Like the above, but used for pretty printing function calls. */ static void dump_function_name (pretty_printer *buffer, tree node, int flags) { if (TREE_CODE (node) == NOP_EXPR) node = TREE_OPERAND (node, 0); if (DECL_NAME (node) && (flags & TDF_ASMNAME) == 0) pp_string (buffer, lang_hooks.decl_printable_name (node, 1)); else dump_decl_name (buffer, node, flags); } /* Dump a function declaration. NODE is the FUNCTION_TYPE. BUFFER, SPC and FLAGS are as in dump_generic_node. */ static void dump_function_declaration (pretty_printer *buffer, tree node, int spc, int flags) { bool wrote_arg = false; tree arg; pp_space (buffer); pp_character (buffer, '('); /* Print the argument types. The last element in the list is a VOID_TYPE. The following avoids printing the last element. */ arg = TYPE_ARG_TYPES (node); while (arg && TREE_CHAIN (arg) && arg != error_mark_node) { wrote_arg = true; dump_generic_node (buffer, TREE_VALUE (arg), spc, flags, false); arg = TREE_CHAIN (arg); if (TREE_CHAIN (arg) && TREE_CODE (TREE_CHAIN (arg)) == TREE_LIST) { pp_character (buffer, ','); pp_space (buffer); } } if (!wrote_arg) pp_string (buffer, "void"); pp_character (buffer, ')'); } /* Dump the domain associated with an array. */ static void dump_array_domain (pretty_printer *buffer, tree domain, int spc, int flags) { pp_character (buffer, '['); if (domain) { tree min = TYPE_MIN_VALUE (domain); tree max = TYPE_MAX_VALUE (domain); if (min && max && integer_zerop (min) && host_integerp (max, 0)) pp_wide_integer (buffer, TREE_INT_CST_LOW (max) + 1); else { if (min) dump_generic_node (buffer, min, spc, flags, false); pp_character (buffer, ':'); if (max) dump_generic_node (buffer, max, spc, flags, false); } } else pp_string (buffer, "<unknown>"); pp_character (buffer, ']'); } /* Dump OpenMP clause CLAUSE. BUFFER, CLAUSE, SPC and FLAGS are as in dump_generic_node. */ static void dump_omp_clause (pretty_printer *buffer, tree clause, int spc, int flags) { const char *name; switch (OMP_CLAUSE_CODE (clause)) { case OMP_CLAUSE_PRIVATE: name = "private"; goto print_remap; case OMP_CLAUSE_SHARED: name = "shared"; goto print_remap; case OMP_CLAUSE_FIRSTPRIVATE: name = "firstprivate"; goto print_remap; case OMP_CLAUSE_LASTPRIVATE: name = "lastprivate"; goto print_remap; case OMP_CLAUSE_COPYIN: name = "copyin"; goto print_remap; case OMP_CLAUSE_COPYPRIVATE: name = "copyprivate"; goto print_remap; print_remap: pp_string (buffer, name); pp_character (buffer, '('); dump_generic_node (buffer, OMP_CLAUSE_DECL (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_REDUCTION: pp_string (buffer, "reduction("); pp_string (buffer, op_symbol_code (OMP_CLAUSE_REDUCTION_CODE (clause))); pp_character (buffer, ':'); dump_generic_node (buffer, OMP_CLAUSE_DECL (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_IF: pp_string (buffer, "if("); dump_generic_node (buffer, OMP_CLAUSE_IF_EXPR (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_NUM_THREADS: pp_string (buffer, "num_threads("); dump_generic_node (buffer, OMP_CLAUSE_NUM_THREADS_EXPR (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_NOWAIT: pp_string (buffer, "nowait"); break; case OMP_CLAUSE_ORDERED: pp_string (buffer, "ordered"); break; case OMP_CLAUSE_DEFAULT: pp_string (buffer, "default("); switch (OMP_CLAUSE_DEFAULT_KIND (clause)) { case OMP_CLAUSE_DEFAULT_UNSPECIFIED: break; case OMP_CLAUSE_DEFAULT_SHARED: pp_string (buffer, "shared"); break; case OMP_CLAUSE_DEFAULT_NONE: pp_string (buffer, "none"); break; case OMP_CLAUSE_DEFAULT_PRIVATE: pp_string (buffer, "private"); break; case OMP_CLAUSE_DEFAULT_FIRSTPRIVATE: pp_string (buffer, "firstprivate"); break; default: gcc_unreachable (); } pp_character (buffer, ')'); break; case OMP_CLAUSE_SCHEDULE: pp_string (buffer, "schedule("); switch (OMP_CLAUSE_SCHEDULE_KIND (clause)) { case OMP_CLAUSE_SCHEDULE_STATIC: pp_string (buffer, "static"); break; case OMP_CLAUSE_SCHEDULE_DYNAMIC: pp_string (buffer, "dynamic"); break; case OMP_CLAUSE_SCHEDULE_GUIDED: pp_string (buffer, "guided"); break; case OMP_CLAUSE_SCHEDULE_RUNTIME: pp_string (buffer, "runtime"); break; case OMP_CLAUSE_SCHEDULE_AUTO: pp_string (buffer, "auto"); break; default: gcc_unreachable (); } if (OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clause)) { pp_character (buffer, ','); dump_generic_node (buffer, OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clause), spc, flags, false); } pp_character (buffer, ')'); break; case OMP_CLAUSE_UNTIED: pp_string (buffer, "untied"); break; case OMP_CLAUSE_COLLAPSE: pp_string (buffer, "collapse("); dump_generic_node (buffer, OMP_CLAUSE_COLLAPSE_EXPR (clause), spc, flags, false); pp_character (buffer, ')'); break; default: /* Should never happen. */ dump_generic_node (buffer, clause, spc, flags, false); break; } } /* Dump the list of OpenMP clauses. BUFFER, SPC and FLAGS are as in dump_generic_node. */ void dump_omp_clauses (pretty_printer *buffer, tree clause, int spc, int flags) { if (clause == NULL) return; pp_space (buffer); while (1) { dump_omp_clause (buffer, clause, spc, flags); clause = OMP_CLAUSE_CHAIN (clause); if (clause == NULL) return; pp_space (buffer); } } /* Dump location LOC to BUFFER. */ static void dump_location (pretty_printer *buffer, location_t loc) { expanded_location xloc = expand_location (loc); pp_character (buffer, '['); if (xloc.file) { pp_string (buffer, xloc.file); pp_string (buffer, " : "); } pp_decimal_int (buffer, xloc.line); pp_string (buffer, "] "); } /* Dump lexical block BLOCK. BUFFER, SPC and FLAGS are as in dump_generic_node. */ static void dump_block_node (pretty_printer *buffer, tree block, int spc, int flags) { tree t; pp_printf (buffer, "BLOCK #%d ", BLOCK_NUMBER (block)); if (flags & TDF_ADDRESS) pp_printf (buffer, "[%p] ", (void *) block); if (BLOCK_ABSTRACT (block)) pp_string (buffer, "[abstract] "); if (TREE_ASM_WRITTEN (block)) pp_string (buffer, "[written] "); if (flags & TDF_SLIM) return; if (BLOCK_SOURCE_LOCATION (block)) dump_location (buffer, BLOCK_SOURCE_LOCATION (block)); newline_and_indent (buffer, spc + 2); if (BLOCK_SUPERCONTEXT (block)) { pp_string (buffer, "SUPERCONTEXT: "); dump_generic_node (buffer, BLOCK_SUPERCONTEXT (block), 0, flags | TDF_SLIM, false); newline_and_indent (buffer, spc + 2); } if (BLOCK_SUBBLOCKS (block)) { pp_string (buffer, "SUBBLOCKS: "); for (t = BLOCK_SUBBLOCKS (block); t; t = BLOCK_CHAIN (t)) { dump_generic_node (buffer, t, 0, flags | TDF_SLIM, false); pp_string (buffer, " "); } newline_and_indent (buffer, spc + 2); } if (BLOCK_CHAIN (block)) { pp_string (buffer, "SIBLINGS: "); for (t = BLOCK_CHAIN (block); t; t = BLOCK_CHAIN (t)) { dump_generic_node (buffer, t, 0, flags | TDF_SLIM, false); pp_string (buffer, " "); } newline_and_indent (buffer, spc + 2); } if (BLOCK_VARS (block)) { pp_string (buffer, "VARS: "); for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t)) { dump_generic_node (buffer, t, 0, flags, false); pp_string (buffer, " "); } newline_and_indent (buffer, spc + 2); } if (VEC_length (tree, BLOCK_NONLOCALIZED_VARS (block)) > 0) { unsigned i; VEC(tree,gc) *nlv = BLOCK_NONLOCALIZED_VARS (block); pp_string (buffer, "NONLOCALIZED_VARS: "); for (i = 0; VEC_iterate (tree, nlv, i, t); i++) { dump_generic_node (buffer, t, 0, flags, false); pp_string (buffer, " "); } newline_and_indent (buffer, spc + 2); } if (BLOCK_ABSTRACT_ORIGIN (block)) { pp_string (buffer, "ABSTRACT_ORIGIN: "); dump_generic_node (buffer, BLOCK_ABSTRACT_ORIGIN (block), 0, flags | TDF_SLIM, false); newline_and_indent (buffer, spc + 2); } if (BLOCK_FRAGMENT_ORIGIN (block)) { pp_string (buffer, "FRAGMENT_ORIGIN: "); dump_generic_node (buffer, BLOCK_FRAGMENT_ORIGIN (block), 0, flags | TDF_SLIM, false); newline_and_indent (buffer, spc + 2); } if (BLOCK_FRAGMENT_CHAIN (block)) { pp_string (buffer, "FRAGMENT_CHAIN: "); for (t = BLOCK_FRAGMENT_CHAIN (block); t; t = BLOCK_FRAGMENT_CHAIN (t)) { dump_generic_node (buffer, t, 0, flags | TDF_SLIM, false); pp_string (buffer, " "); } newline_and_indent (buffer, spc + 2); } } /* Dump the node NODE on the pretty_printer BUFFER, SPC spaces of indent. FLAGS specifies details to show in the dump (see TDF_* in tree-pass.h). If IS_STMT is true, the object printed is considered to be a statement and it is terminated by ';' if appropriate. */ int dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags, bool is_stmt) { tree type; tree op0, op1; const char *str; bool is_expr; if (node == NULL_TREE) return spc; is_expr = EXPR_P (node); if (is_stmt && (flags & TDF_STMTADDR)) pp_printf (buffer, "<&%p> ", (void *)node); if ((flags & TDF_LINENO) && EXPR_HAS_LOCATION (node)) dump_location (buffer, EXPR_LOCATION (node)); switch (TREE_CODE (node)) { case ERROR_MARK: pp_string (buffer, "<<< error >>>"); break; case IDENTIFIER_NODE: pp_tree_identifier (buffer, node); break; case TREE_LIST: while (node && node != error_mark_node) { if (TREE_PURPOSE (node)) { dump_generic_node (buffer, TREE_PURPOSE (node), spc, flags, false); pp_space (buffer); } dump_generic_node (buffer, TREE_VALUE (node), spc, flags, false); node = TREE_CHAIN (node); if (node && TREE_CODE (node) == TREE_LIST) { pp_character (buffer, ','); pp_space (buffer); } } break; case TREE_BINFO: dump_generic_node (buffer, BINFO_TYPE (node), spc, flags, false); break; case TREE_VEC: { size_t i; if (TREE_VEC_LENGTH (node) > 0) { size_t len = TREE_VEC_LENGTH (node); for (i = 0; i < len - 1; i++) { dump_generic_node (buffer, TREE_VEC_ELT (node, i), spc, flags, false); pp_character (buffer, ','); pp_space (buffer); } dump_generic_node (buffer, TREE_VEC_ELT (node, len - 1), spc, flags, false); } } break; case VOID_TYPE: case INTEGER_TYPE: case REAL_TYPE: case FIXED_POINT_TYPE: case COMPLEX_TYPE: case VECTOR_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: { unsigned int quals = TYPE_QUALS (node); enum tree_code_class tclass; if (quals & TYPE_QUAL_CONST) pp_string (buffer, "const "); else if (quals & TYPE_QUAL_VOLATILE) pp_string (buffer, "volatile "); else if (quals & TYPE_QUAL_RESTRICT) pp_string (buffer, "restrict "); if (!ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (node))) { pp_string (buffer, "<address-space-"); pp_decimal_int (buffer, TYPE_ADDR_SPACE (node)); pp_string (buffer, "> "); } tclass = TREE_CODE_CLASS (TREE_CODE (node)); if (tclass == tcc_declaration) { if (DECL_NAME (node)) dump_decl_name (buffer, node, flags); else pp_string (buffer, "<unnamed type decl>"); } else if (tclass == tcc_type) { if (TYPE_NAME (node)) { if (TREE_CODE (TYPE_NAME (node)) == IDENTIFIER_NODE) pp_tree_identifier (buffer, TYPE_NAME (node)); else if (TREE_CODE (TYPE_NAME (node)) == TYPE_DECL && DECL_NAME (TYPE_NAME (node))) dump_decl_name (buffer, TYPE_NAME (node), flags); else pp_string (buffer, "<unnamed type>"); } else if (TREE_CODE (node) == VECTOR_TYPE) { pp_string (buffer, "vector "); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } else if (TREE_CODE (node) == INTEGER_TYPE) { pp_string (buffer, (TYPE_UNSIGNED (node) ? "<unnamed-unsigned:" : "<unnamed-signed:")); pp_decimal_int (buffer, TYPE_PRECISION (node)); pp_string (buffer, ">"); } else if (TREE_CODE (node) == COMPLEX_TYPE) { pp_string (buffer, "__complex__ "); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } else if (TREE_CODE (node) == REAL_TYPE) { pp_string (buffer, "<float:"); pp_decimal_int (buffer, TYPE_PRECISION (node)); pp_string (buffer, ">"); } else if (TREE_CODE (node) == FIXED_POINT_TYPE) { pp_string (buffer, "<fixed-point-"); pp_string (buffer, TYPE_SATURATING (node) ? "sat:" : "nonsat:"); pp_decimal_int (buffer, TYPE_PRECISION (node)); pp_string (buffer, ">"); } else pp_string (buffer, "<unnamed type>"); } break; } case POINTER_TYPE: case REFERENCE_TYPE: str = (TREE_CODE (node) == POINTER_TYPE ? "*" : "&"); if (TREE_TYPE (node) == NULL) { pp_string (buffer, str); pp_string (buffer, "<null type>"); } else if (TREE_CODE (TREE_TYPE (node)) == FUNCTION_TYPE) { tree fnode = TREE_TYPE (node); dump_generic_node (buffer, TREE_TYPE (fnode), spc, flags, false); pp_space (buffer); pp_character (buffer, '('); pp_string (buffer, str); if (TYPE_NAME (node) && DECL_NAME (TYPE_NAME (node))) dump_decl_name (buffer, TYPE_NAME (node), flags); else pp_printf (buffer, "<T%x>", TYPE_UID (node)); pp_character (buffer, ')'); dump_function_declaration (buffer, fnode, spc, flags); } else { unsigned int quals = TYPE_QUALS (node); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_space (buffer); pp_string (buffer, str); if (quals & TYPE_QUAL_CONST) pp_string (buffer, " const"); if (quals & TYPE_QUAL_VOLATILE) pp_string (buffer, " volatile"); if (quals & TYPE_QUAL_RESTRICT) pp_string (buffer, " restrict"); if (!ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (node))) { pp_string (buffer, " <address-space-"); pp_decimal_int (buffer, TYPE_ADDR_SPACE (node)); pp_string (buffer, ">"); } if (TYPE_REF_CAN_ALIAS_ALL (node)) pp_string (buffer, " {ref-all}"); } break; case OFFSET_TYPE: NIY; break; case TARGET_MEM_REF: { const char *sep = ""; tree tmp; pp_string (buffer, "MEM["); tmp = TMR_SYMBOL (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "symbol: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_BASE (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "base: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_INDEX (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "index: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_STEP (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "step: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_OFFSET (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "offset: "); dump_generic_node (buffer, tmp, spc, flags, false); } pp_string (buffer, "]"); if (flags & TDF_DETAILS) { pp_string (buffer, "{"); dump_generic_node (buffer, TMR_ORIGINAL (node), spc, flags, false); pp_string (buffer, "}"); } } break; case ARRAY_TYPE: { tree tmp; /* Print the innermost component type. */ for (tmp = TREE_TYPE (node); TREE_CODE (tmp) == ARRAY_TYPE; tmp = TREE_TYPE (tmp)) ; dump_generic_node (buffer, tmp, spc, flags, false); /* Print the dimensions. */ for (tmp = node; TREE_CODE (tmp) == ARRAY_TYPE; tmp = TREE_TYPE (tmp)) dump_array_domain (buffer, TYPE_DOMAIN (tmp), spc, flags); break; } case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: { unsigned int quals = TYPE_QUALS (node); if (quals & TYPE_QUAL_CONST) pp_string (buffer, "const "); if (quals & TYPE_QUAL_VOLATILE) pp_string (buffer, "volatile "); /* Print the name of the structure. */ if (TREE_CODE (node) == RECORD_TYPE) pp_string (buffer, "struct "); else if (TREE_CODE (node) == UNION_TYPE) pp_string (buffer, "union "); if (TYPE_NAME (node)) dump_generic_node (buffer, TYPE_NAME (node), spc, flags, false); else if (!(flags & TDF_SLIM)) /* FIXME: If we eliminate the 'else' above and attempt to show the fields for named types, we may get stuck following a cycle of pointers to structs. The alleged self-reference check in print_struct_decl will not detect cycles involving more than one pointer or struct type. */ print_struct_decl (buffer, node, spc, flags); break; } case LANG_TYPE: NIY; break; case INTEGER_CST: if (TREE_CODE (TREE_TYPE (node)) == POINTER_TYPE) { /* In the case of a pointer, one may want to divide by the size of the pointed-to type. Unfortunately, this not straightforward. The C front-end maps expressions (int *) 5 int *p; (p + 5) in such a way that the two INTEGER_CST nodes for "5" have different values but identical types. In the latter case, the 5 is multiplied by sizeof (int) in c-common.c (pointer_int_sum) to convert it to a byte address, and yet the type of the node is left unchanged. Argh. What is consistent though is that the number value corresponds to bytes (UNITS) offset. NB: Neither of the following divisors can be trivially used to recover the original literal: TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (node))) TYPE_PRECISION (TREE_TYPE (TREE_TYPE (node))) */ pp_wide_integer (buffer, TREE_INT_CST_LOW (node)); pp_string (buffer, "B"); /* pseudo-unit */ } else if (! host_integerp (node, 0)) { tree val = node; unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (val); HOST_WIDE_INT high = TREE_INT_CST_HIGH (val); if (tree_int_cst_sgn (val) < 0) { pp_character (buffer, '-'); high = ~high + !low; low = -low; } /* Would "%x%0*x" or "%x%*0x" get zero-padding on all systems? */ sprintf (pp_buffer (buffer)->digit_buffer, HOST_WIDE_INT_PRINT_DOUBLE_HEX, (unsigned HOST_WIDE_INT) high, low); pp_string (buffer, pp_buffer (buffer)->digit_buffer); } else pp_wide_integer (buffer, TREE_INT_CST_LOW (node)); break; case REAL_CST: /* Code copied from print_node. */ { REAL_VALUE_TYPE d; if (TREE_OVERFLOW (node)) pp_string (buffer, " overflow"); #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC) d = TREE_REAL_CST (node); if (REAL_VALUE_ISINF (d)) pp_string (buffer, REAL_VALUE_NEGATIVE (d) ? " -Inf" : " Inf"); else if (REAL_VALUE_ISNAN (d)) pp_string (buffer, " Nan"); else { char string[100]; real_to_decimal (string, &d, sizeof (string), 0, 1); pp_string (buffer, string); } #else { HOST_WIDE_INT i; unsigned char *p = (unsigned char *) &TREE_REAL_CST (node); pp_string (buffer, "0x"); for (i = 0; i < sizeof TREE_REAL_CST (node); i++) output_formatted_integer (buffer, "%02x", *p++); } #endif break; } case FIXED_CST: { char string[100]; fixed_to_decimal (string, TREE_FIXED_CST_PTR (node), sizeof (string)); pp_string (buffer, string); break; } case COMPLEX_CST: pp_string (buffer, "__complex__ ("); dump_generic_node (buffer, TREE_REALPART (node), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_IMAGPART (node), spc, flags, false); pp_string (buffer, ")"); break; case STRING_CST: pp_string (buffer, "\""); pretty_print_string (buffer, TREE_STRING_POINTER (node)); pp_string (buffer, "\""); break; case VECTOR_CST: { tree elt; pp_string (buffer, "{ "); for (elt = TREE_VECTOR_CST_ELTS (node); elt; elt = TREE_CHAIN (elt)) { dump_generic_node (buffer, TREE_VALUE (elt), spc, flags, false); if (TREE_CHAIN (elt)) pp_string (buffer, ", "); } pp_string (buffer, " }"); } break; case FUNCTION_TYPE: case METHOD_TYPE: dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_space (buffer); if (TREE_CODE (node) == METHOD_TYPE) { if (TYPE_METHOD_BASETYPE (node)) dump_decl_name (buffer, TYPE_NAME (TYPE_METHOD_BASETYPE (node)), flags); else pp_string (buffer, "<null method basetype>"); pp_string (buffer, "::"); } if (TYPE_NAME (node) && DECL_NAME (TYPE_NAME (node))) dump_decl_name (buffer, TYPE_NAME (node), flags); else pp_printf (buffer, "<T%x>", TYPE_UID (node)); dump_function_declaration (buffer, node, spc, flags); break; case FUNCTION_DECL: case CONST_DECL: dump_decl_name (buffer, node, flags); break; case LABEL_DECL: if (DECL_NAME (node)) dump_decl_name (buffer, node, flags); else if (LABEL_DECL_UID (node) != -1) pp_printf (buffer, "<L%d>", (int) LABEL_DECL_UID (node)); else { if (flags & TDF_NOUID) pp_string (buffer, "<D.xxxx>"); else pp_printf (buffer, "<D.%u>", DECL_UID (node)); } break; case TYPE_DECL: if (DECL_IS_BUILTIN (node)) { /* Don't print the declaration of built-in types. */ break; } if (DECL_NAME (node)) dump_decl_name (buffer, node, flags); else { if ((TREE_CODE (TREE_TYPE (node)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (node)) == UNION_TYPE) && TYPE_METHODS (TREE_TYPE (node))) { /* The type is a c++ class: all structures have at least 4 methods. */ pp_string (buffer, "class "); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } else { pp_string (buffer, (TREE_CODE (TREE_TYPE (node)) == UNION_TYPE ? "union" : "struct ")); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } } break; case VAR_DECL: case PARM_DECL: case FIELD_DECL: case DEBUG_EXPR_DECL: case NAMESPACE_DECL: dump_decl_name (buffer, node, flags); break; case RESULT_DECL: pp_string (buffer, "<retval>"); break; case COMPONENT_REF: op0 = TREE_OPERAND (node, 0); str = "."; if (op0 && TREE_CODE (op0) == INDIRECT_REF) { op0 = TREE_OPERAND (op0, 0); str = "->"; } if (op_prio (op0) < op_prio (node)) pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_character (buffer, ')'); pp_string (buffer, str); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); op0 = component_ref_field_offset (node); if (op0 && TREE_CODE (op0) != INTEGER_CST) { pp_string (buffer, "{off: "); dump_generic_node (buffer, op0, spc, flags, false); pp_character (buffer, '}'); } break; case BIT_FIELD_REF: pp_string (buffer, "BIT_FIELD_REF <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, ">"); break; case ARRAY_REF: case ARRAY_RANGE_REF: op0 = TREE_OPERAND (node, 0); if (op_prio (op0) < op_prio (node)) pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_character (buffer, ')'); pp_character (buffer, '['); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); if (TREE_CODE (node) == ARRAY_RANGE_REF) pp_string (buffer, " ..."); pp_character (buffer, ']'); op0 = array_ref_low_bound (node); op1 = array_ref_element_size (node); if (!integer_zerop (op0) || TREE_OPERAND (node, 2) || TREE_OPERAND (node, 3)) { pp_string (buffer, "{lb: "); dump_generic_node (buffer, op0, spc, flags, false); pp_string (buffer, " sz: "); dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, '}'); } break; case CONSTRUCTOR: { unsigned HOST_WIDE_INT ix; tree field, val; bool is_struct_init = FALSE; pp_character (buffer, '{'); if (TREE_CODE (TREE_TYPE (node)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (node)) == UNION_TYPE) is_struct_init = TRUE; FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (node), ix, field, val) { if (field && is_struct_init) { pp_character (buffer, '.'); dump_generic_node (buffer, field, spc, flags, false); pp_string (buffer, "="); } if (val && TREE_CODE (val) == ADDR_EXPR) if (TREE_CODE (TREE_OPERAND (val, 0)) == FUNCTION_DECL) val = TREE_OPERAND (val, 0); if (val && TREE_CODE (val) == FUNCTION_DECL) dump_decl_name (buffer, val, flags); else dump_generic_node (buffer, val, spc, flags, false); if (ix != VEC_length (constructor_elt, CONSTRUCTOR_ELTS (node)) - 1) { pp_character (buffer, ','); pp_space (buffer); } } pp_character (buffer, '}'); } break; case COMPOUND_EXPR: { tree *tp; if (flags & TDF_SLIM) { pp_string (buffer, "<COMPOUND_EXPR>"); break; } dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, !(flags & TDF_SLIM)); if (flags & TDF_SLIM) newline_and_indent (buffer, spc); else { pp_character (buffer, ','); pp_space (buffer); } for (tp = &TREE_OPERAND (node, 1); TREE_CODE (*tp) == COMPOUND_EXPR; tp = &TREE_OPERAND (*tp, 1)) { dump_generic_node (buffer, TREE_OPERAND (*tp, 0), spc, flags, !(flags & TDF_SLIM)); if (flags & TDF_SLIM) newline_and_indent (buffer, spc); else { pp_character (buffer, ','); pp_space (buffer); } } dump_generic_node (buffer, *tp, spc, flags, !(flags & TDF_SLIM)); } break; case STATEMENT_LIST: { tree_stmt_iterator si; bool first = true; if (flags & TDF_SLIM) { pp_string (buffer, "<STATEMENT_LIST>"); break; } for (si = tsi_start (node); !tsi_end_p (si); tsi_next (&si)) { if (!first) newline_and_indent (buffer, spc); else first = false; dump_generic_node (buffer, tsi_stmt (si), spc, flags, true); } } break; case MODIFY_EXPR: case INIT_EXPR: dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '='); if (TREE_CODE (node) == MODIFY_EXPR && MOVE_NONTEMPORAL (node)) pp_string (buffer, "{nt}"); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); break; case TARGET_EXPR: pp_string (buffer, "TARGET_EXPR <"); dump_generic_node (buffer, TARGET_EXPR_SLOT (node), spc, flags, false); pp_character (buffer, ','); pp_space (buffer); dump_generic_node (buffer, TARGET_EXPR_INITIAL (node), spc, flags, false); pp_character (buffer, '>'); break; case DECL_EXPR: print_declaration (buffer, DECL_EXPR_DECL (node), spc, flags); is_stmt = false; break; case COND_EXPR: if (TREE_TYPE (node) == NULL || TREE_TYPE (node) == void_type_node) { pp_string (buffer, "if ("); dump_generic_node (buffer, COND_EXPR_COND (node), spc, flags, false); pp_character (buffer, ')'); /* The lowered cond_exprs should always be printed in full. */ if (COND_EXPR_THEN (node) && (IS_EMPTY_STMT (COND_EXPR_THEN (node)) || TREE_CODE (COND_EXPR_THEN (node)) == GOTO_EXPR) && COND_EXPR_ELSE (node) && (IS_EMPTY_STMT (COND_EXPR_ELSE (node)) || TREE_CODE (COND_EXPR_ELSE (node)) == GOTO_EXPR)) { pp_space (buffer); dump_generic_node (buffer, COND_EXPR_THEN (node), 0, flags, true); if (!IS_EMPTY_STMT (COND_EXPR_ELSE (node))) { pp_string (buffer, " else "); dump_generic_node (buffer, COND_EXPR_ELSE (node), 0, flags, true); } } else if (!(flags & TDF_SLIM)) { /* Output COND_EXPR_THEN. */ if (COND_EXPR_THEN (node)) { newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, COND_EXPR_THEN (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } /* Output COND_EXPR_ELSE. */ if (COND_EXPR_ELSE (node) && !IS_EMPTY_STMT (COND_EXPR_ELSE (node))) { newline_and_indent (buffer, spc); pp_string (buffer, "else"); newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, COND_EXPR_ELSE (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } } is_expr = false; } else { dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '?'); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_space (buffer); pp_character (buffer, ':'); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); } break; case BIND_EXPR: pp_character (buffer, '{'); if (!(flags & TDF_SLIM)) { if (BIND_EXPR_VARS (node)) { pp_newline (buffer); for (op0 = BIND_EXPR_VARS (node); op0; op0 = TREE_CHAIN (op0)) { print_declaration (buffer, op0, spc+2, flags); pp_newline (buffer); } } newline_and_indent (buffer, spc+2); dump_generic_node (buffer, BIND_EXPR_BODY (node), spc+2, flags, true); newline_and_indent (buffer, spc); pp_character (buffer, '}'); } is_expr = false; break; case CALL_EXPR: print_call_name (buffer, CALL_EXPR_FN (node), flags); /* Print parameters. */ pp_space (buffer); pp_character (buffer, '('); { tree arg; call_expr_arg_iterator iter; FOR_EACH_CALL_EXPR_ARG (arg, iter, node) { dump_generic_node (buffer, arg, spc, flags, false); if (more_call_expr_args_p (&iter)) { pp_character (buffer, ','); pp_space (buffer); } } } if (CALL_EXPR_VA_ARG_PACK (node)) { if (call_expr_nargs (node) > 0) { pp_character (buffer, ','); pp_space (buffer); } pp_string (buffer, "__builtin_va_arg_pack ()"); } pp_character (buffer, ')'); op1 = CALL_EXPR_STATIC_CHAIN (node); if (op1) { pp_string (buffer, " [static-chain: "); dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, ']'); } if (CALL_EXPR_RETURN_SLOT_OPT (node)) pp_string (buffer, " [return slot optimization]"); if (CALL_EXPR_TAILCALL (node)) pp_string (buffer, " [tail call]"); break; case WITH_CLEANUP_EXPR: NIY; break; case CLEANUP_POINT_EXPR: pp_string (buffer, "<<cleanup_point "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">>"); break; case PLACEHOLDER_EXPR: pp_string (buffer, "<PLACEHOLDER_EXPR "); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_character (buffer, '>'); break; /* Binary arithmetic and logic expressions. */ case WIDEN_SUM_EXPR: case WIDEN_MULT_EXPR: case MULT_EXPR: case PLUS_EXPR: case POINTER_PLUS_EXPR: case MINUS_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: case RDIV_EXPR: case EXACT_DIV_EXPR: case LSHIFT_EXPR: case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: case VEC_LSHIFT_EXPR: case VEC_RSHIFT_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case BIT_AND_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: case EQ_EXPR: case NE_EXPR: case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: case ORDERED_EXPR: case UNORDERED_EXPR: { const char *op = op_symbol (node); op0 = TREE_OPERAND (node, 0); op1 = TREE_OPERAND (node, 1); /* When the operands are expressions with less priority, keep semantics of the tree representation. */ if (op_prio (op0) <= op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, op0, spc, flags, false); pp_space (buffer); pp_string (buffer, op); pp_space (buffer); /* When the operands are expressions with less priority, keep semantics of the tree representation. */ if (op_prio (op1) <= op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, op1, spc, flags, false); } break; /* Unary arithmetic and logic expressions. */ case NEGATE_EXPR: case BIT_NOT_EXPR: case TRUTH_NOT_EXPR: case ADDR_EXPR: case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case ALIGN_INDIRECT_REF: case MISALIGNED_INDIRECT_REF: case INDIRECT_REF: if (TREE_CODE (node) == ADDR_EXPR && (TREE_CODE (TREE_OPERAND (node, 0)) == STRING_CST || TREE_CODE (TREE_OPERAND (node, 0)) == FUNCTION_DECL)) ; /* Do not output '&' for strings and function pointers. */ else pp_string (buffer, op_symbol (node)); if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); if (TREE_CODE (node) == MISALIGNED_INDIRECT_REF) { pp_string (buffer, "{misalignment: "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_character (buffer, '}'); } break; case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, op_symbol (node)); break; case MIN_EXPR: pp_string (buffer, "MIN_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_character (buffer, '>'); break; case MAX_EXPR: pp_string (buffer, "MAX_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_character (buffer, '>'); break; case ABS_EXPR: pp_string (buffer, "ABS_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, '>'); break; case RANGE_EXPR: NIY; break; case ADDR_SPACE_CONVERT_EXPR: case FIXED_CONVERT_EXPR: case FIX_TRUNC_EXPR: case FLOAT_EXPR: CASE_CONVERT: type = TREE_TYPE (node); op0 = TREE_OPERAND (node, 0); if (type != TREE_TYPE (op0)) { pp_character (buffer, '('); dump_generic_node (buffer, type, spc, flags, false); pp_string (buffer, ") "); } if (op_prio (op0) < op_prio (node)) pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_character (buffer, ')'); break; case VIEW_CONVERT_EXPR: pp_string (buffer, "VIEW_CONVERT_EXPR<"); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_string (buffer, ">("); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); break; case PAREN_EXPR: pp_string (buffer, "(("); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, "))"); break; case NON_LVALUE_EXPR: pp_string (buffer, "NON_LVALUE_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, '>'); break; case SAVE_EXPR: pp_string (buffer, "SAVE_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, '>'); break; case COMPLEX_EXPR: pp_string (buffer, "COMPLEX_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ">"); break; case CONJ_EXPR: pp_string (buffer, "CONJ_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case REALPART_EXPR: pp_string (buffer, "REALPART_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case IMAGPART_EXPR: pp_string (buffer, "IMAGPART_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case VA_ARG_EXPR: pp_string (buffer, "VA_ARG_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: pp_string (buffer, "try"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); newline_and_indent (buffer, spc); pp_string (buffer, (TREE_CODE (node) == TRY_CATCH_EXPR) ? "catch" : "finally"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); is_expr = false; break; case CATCH_EXPR: pp_string (buffer, "catch ("); dump_generic_node (buffer, CATCH_TYPES (node), spc+2, flags, false); pp_string (buffer, ")"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, CATCH_BODY (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); is_expr = false; break; case EH_FILTER_EXPR: pp_string (buffer, "<<<eh_filter ("); dump_generic_node (buffer, EH_FILTER_TYPES (node), spc+2, flags, false); pp_string (buffer, ")>>>"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, EH_FILTER_FAILURE (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); is_expr = false; break; case LABEL_EXPR: op0 = TREE_OPERAND (node, 0); /* If this is for break or continue, don't bother printing it. */ if (DECL_NAME (op0)) { const char *name = IDENTIFIER_POINTER (DECL_NAME (op0)); if (strcmp (name, "break") == 0 || strcmp (name, "continue") == 0) break; } dump_generic_node (buffer, op0, spc, flags, false); pp_character (buffer, ':'); if (DECL_NONLOCAL (op0)) pp_string (buffer, " [non-local]"); break; case LOOP_EXPR: pp_string (buffer, "while (1)"); if (!(flags & TDF_SLIM)) { newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, LOOP_EXPR_BODY (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } is_expr = false; break; case PREDICT_EXPR: pp_string (buffer, "// predicted "); if (PREDICT_EXPR_OUTCOME (node)) pp_string (buffer, "likely by "); else pp_string (buffer, "unlikely by "); pp_string (buffer, predictor_name (PREDICT_EXPR_PREDICTOR (node))); pp_string (buffer, " predictor."); break; case RETURN_EXPR: pp_string (buffer, "return"); op0 = TREE_OPERAND (node, 0); if (op0) { pp_space (buffer); if (TREE_CODE (op0) == MODIFY_EXPR) dump_generic_node (buffer, TREE_OPERAND (op0, 1), spc, flags, false); else dump_generic_node (buffer, op0, spc, flags, false); } break; case EXIT_EXPR: pp_string (buffer, "if ("); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ") break"); break; case SWITCH_EXPR: pp_string (buffer, "switch ("); dump_generic_node (buffer, SWITCH_COND (node), spc, flags, false); pp_character (buffer, ')'); if (!(flags & TDF_SLIM)) { newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); if (SWITCH_BODY (node)) { newline_and_indent (buffer, spc+4); dump_generic_node (buffer, SWITCH_BODY (node), spc+4, flags, true); } else { tree vec = SWITCH_LABELS (node); size_t i, n = TREE_VEC_LENGTH (vec); for (i = 0; i < n; ++i) { tree elt = TREE_VEC_ELT (vec, i); newline_and_indent (buffer, spc+4); if (elt) { dump_generic_node (buffer, elt, spc+4, flags, false); pp_string (buffer, " goto "); dump_generic_node (buffer, CASE_LABEL (elt), spc+4, flags, true); pp_semicolon (buffer); } else pp_string (buffer, "case ???: goto ???;"); } } newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } is_expr = false; break; case GOTO_EXPR: op0 = GOTO_DESTINATION (node); if (TREE_CODE (op0) != SSA_NAME && DECL_P (op0) && DECL_NAME (op0)) { const char *name = IDENTIFIER_POINTER (DECL_NAME (op0)); if (strcmp (name, "break") == 0 || strcmp (name, "continue") == 0) { pp_string (buffer, name); break; } } pp_string (buffer, "goto "); dump_generic_node (buffer, op0, spc, flags, false); break; case ASM_EXPR: pp_string (buffer, "__asm__"); if (ASM_VOLATILE_P (node)) pp_string (buffer, " __volatile__"); pp_character (buffer, '('); dump_generic_node (buffer, ASM_STRING (node), spc, flags, false); pp_character (buffer, ':'); dump_generic_node (buffer, ASM_OUTPUTS (node), spc, flags, false); pp_character (buffer, ':'); dump_generic_node (buffer, ASM_INPUTS (node), spc, flags, false); if (ASM_CLOBBERS (node)) { pp_character (buffer, ':'); dump_generic_node (buffer, ASM_CLOBBERS (node), spc, flags, false); } pp_string (buffer, ")"); break; case CASE_LABEL_EXPR: if (CASE_LOW (node) && CASE_HIGH (node)) { pp_string (buffer, "case "); dump_generic_node (buffer, CASE_LOW (node), spc, flags, false); pp_string (buffer, " ... "); dump_generic_node (buffer, CASE_HIGH (node), spc, flags, false); } else if (CASE_LOW (node)) { pp_string (buffer, "case "); dump_generic_node (buffer, CASE_LOW (node), spc, flags, false); } else pp_string (buffer, "default"); pp_character (buffer, ':'); break; case OBJ_TYPE_REF: pp_string (buffer, "OBJ_TYPE_REF("); dump_generic_node (buffer, OBJ_TYPE_REF_EXPR (node), spc, flags, false); pp_character (buffer, ';'); dump_generic_node (buffer, OBJ_TYPE_REF_OBJECT (node), spc, flags, false); pp_character (buffer, '-'); pp_character (buffer, '>'); dump_generic_node (buffer, OBJ_TYPE_REF_TOKEN (node), spc, flags, false); pp_character (buffer, ')'); break; case SSA_NAME: dump_generic_node (buffer, SSA_NAME_VAR (node), spc, flags, false); pp_string (buffer, "_"); pp_decimal_int (buffer, SSA_NAME_VERSION (node)); if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (node)) pp_string (buffer, "(ab)"); else if (SSA_NAME_IS_DEFAULT_DEF (node)) pp_string (buffer, "(D)"); break; case WITH_SIZE_EXPR: pp_string (buffer, "WITH_SIZE_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ">"); break; case ASSERT_EXPR: pp_string (buffer, "ASSERT_EXPR <"); dump_generic_node (buffer, ASSERT_EXPR_VAR (node), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, ASSERT_EXPR_COND (node), spc, flags, false); pp_string (buffer, ">"); break; case SCEV_KNOWN: pp_string (buffer, "scev_known"); break; case SCEV_NOT_KNOWN: pp_string (buffer, "scev_not_known"); break; case POLYNOMIAL_CHREC: pp_string (buffer, "{"); dump_generic_node (buffer, CHREC_LEFT (node), spc, flags, false); pp_string (buffer, ", +, "); dump_generic_node (buffer, CHREC_RIGHT (node), spc, flags, false); pp_string (buffer, "}_"); dump_generic_node (buffer, CHREC_VAR (node), spc, flags, false); is_stmt = false; break; case REALIGN_LOAD_EXPR: pp_string (buffer, "REALIGN_LOAD <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, ">"); break; case VEC_COND_EXPR: pp_string (buffer, " VEC_COND_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " , "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " , "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, " > "); break; case DOT_PROD_EXPR: pp_string (buffer, " DOT_PROD_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, " > "); break; case OMP_PARALLEL: pp_string (buffer, "#pragma omp parallel"); dump_omp_clauses (buffer, OMP_PARALLEL_CLAUSES (node), spc, flags); dump_omp_body: if (!(flags & TDF_SLIM) && OMP_BODY (node)) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); newline_and_indent (buffer, spc + 4); dump_generic_node (buffer, OMP_BODY (node), spc + 4, flags, false); newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } is_expr = false; break; case OMP_TASK: pp_string (buffer, "#pragma omp task"); dump_omp_clauses (buffer, OMP_TASK_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_FOR: pp_string (buffer, "#pragma omp for"); dump_omp_clauses (buffer, OMP_FOR_CLAUSES (node), spc, flags); if (!(flags & TDF_SLIM)) { int i; if (OMP_FOR_PRE_BODY (node)) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); spc += 4; newline_and_indent (buffer, spc); dump_generic_node (buffer, OMP_FOR_PRE_BODY (node), spc, flags, false); } spc -= 2; for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (node)); i++) { spc += 2; newline_and_indent (buffer, spc); pp_string (buffer, "for ("); dump_generic_node (buffer, TREE_VEC_ELT (OMP_FOR_INIT (node), i), spc, flags, false); pp_string (buffer, "; "); dump_generic_node (buffer, TREE_VEC_ELT (OMP_FOR_COND (node), i), spc, flags, false); pp_string (buffer, "; "); dump_generic_node (buffer, TREE_VEC_ELT (OMP_FOR_INCR (node), i), spc, flags, false); pp_string (buffer, ")"); } if (OMP_FOR_BODY (node)) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); newline_and_indent (buffer, spc + 4); dump_generic_node (buffer, OMP_FOR_BODY (node), spc + 4, flags, false); newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } spc -= 2 * TREE_VEC_LENGTH (OMP_FOR_INIT (node)) - 2; if (OMP_FOR_PRE_BODY (node)) { spc -= 4; newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } } is_expr = false; break; case OMP_SECTIONS: pp_string (buffer, "#pragma omp sections"); dump_omp_clauses (buffer, OMP_SECTIONS_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_SECTION: pp_string (buffer, "#pragma omp section"); goto dump_omp_body; case OMP_MASTER: pp_string (buffer, "#pragma omp master"); goto dump_omp_body; case OMP_ORDERED: pp_string (buffer, "#pragma omp ordered"); goto dump_omp_body; case OMP_CRITICAL: pp_string (buffer, "#pragma omp critical"); if (OMP_CRITICAL_NAME (node)) { pp_space (buffer); pp_character (buffer, '('); dump_generic_node (buffer, OMP_CRITICAL_NAME (node), spc, flags, false); pp_character (buffer, ')'); } goto dump_omp_body; case OMP_ATOMIC: pp_string (buffer, "#pragma omp atomic"); newline_and_indent (buffer, spc + 2); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '='); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); break; case OMP_SINGLE: pp_string (buffer, "#pragma omp single"); dump_omp_clauses (buffer, OMP_SINGLE_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_CLAUSE: dump_omp_clause (buffer, node, spc, flags); is_expr = false; break; case REDUC_MAX_EXPR: pp_string (buffer, " REDUC_MAX_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case REDUC_MIN_EXPR: pp_string (buffer, " REDUC_MIN_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case REDUC_PLUS_EXPR: pp_string (buffer, " REDUC_PLUS_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_WIDEN_MULT_HI_EXPR: pp_string (buffer, " VEC_WIDEN_MULT_HI_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_WIDEN_MULT_LO_EXPR: pp_string (buffer, " VEC_WIDEN_MULT_LO_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_UNPACK_HI_EXPR: pp_string (buffer, " VEC_UNPACK_HI_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_UNPACK_LO_EXPR: pp_string (buffer, " VEC_UNPACK_LO_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_UNPACK_FLOAT_HI_EXPR: pp_string (buffer, " VEC_UNPACK_FLOAT_HI_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_UNPACK_FLOAT_LO_EXPR: pp_string (buffer, " VEC_UNPACK_FLOAT_LO_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_PACK_TRUNC_EXPR: pp_string (buffer, " VEC_PACK_TRUNC_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_PACK_SAT_EXPR: pp_string (buffer, " VEC_PACK_SAT_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_PACK_FIX_TRUNC_EXPR: pp_string (buffer, " VEC_PACK_FIX_TRUNC_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case BLOCK: dump_block_node (buffer, node, spc, flags); break; case VEC_EXTRACT_EVEN_EXPR: pp_string (buffer, " VEC_EXTRACT_EVEN_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_EXTRACT_ODD_EXPR: pp_string (buffer, " VEC_EXTRACT_ODD_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_INTERLEAVE_HIGH_EXPR: pp_string (buffer, " VEC_INTERLEAVE_HIGH_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_INTERLEAVE_LOW_EXPR: pp_string (buffer, " VEC_INTERLEAVE_LOW_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; default: NIY; } if (is_stmt && is_expr) pp_semicolon (buffer); /* If we're building a diagnostic, the formatted text will be written into BUFFER's stream by the caller; otherwise, write it now. */ if (!(flags & TDF_DIAGNOSTIC)) pp_write_text_to_stream (buffer); return spc; } /* Print the declaration of a variable. */ void print_declaration (pretty_printer *buffer, tree t, int spc, int flags) { INDENT (spc); if (TREE_CODE (t) == TYPE_DECL) pp_string (buffer, "typedef "); if (CODE_CONTAINS_STRUCT (TREE_CODE (t), TS_DECL_WRTL) && DECL_REGISTER (t)) pp_string (buffer, "register "); if (TREE_PUBLIC (t) && DECL_EXTERNAL (t)) pp_string (buffer, "extern "); else if (TREE_STATIC (t)) pp_string (buffer, "static "); /* Print the type and name. */ if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) { tree tmp; /* Print array's type. */ tmp = TREE_TYPE (t); while (TREE_CODE (TREE_TYPE (tmp)) == ARRAY_TYPE) tmp = TREE_TYPE (tmp); dump_generic_node (buffer, TREE_TYPE (tmp), spc, flags, false); /* Print variable's name. */ pp_space (buffer); dump_generic_node (buffer, t, spc, flags, false); /* Print the dimensions. */ tmp = TREE_TYPE (t); while (TREE_CODE (tmp) == ARRAY_TYPE) { dump_array_domain (buffer, TYPE_DOMAIN (tmp), spc, flags); tmp = TREE_TYPE (tmp); } } else if (TREE_CODE (t) == FUNCTION_DECL) { dump_generic_node (buffer, TREE_TYPE (TREE_TYPE (t)), spc, flags, false); pp_space (buffer); dump_decl_name (buffer, t, flags); dump_function_declaration (buffer, TREE_TYPE (t), spc, flags); } else { /* Print type declaration. */ dump_generic_node (buffer, TREE_TYPE (t), spc, flags, false); /* Print variable's name. */ pp_space (buffer); dump_generic_node (buffer, t, spc, flags, false); } if (TREE_CODE (t) == VAR_DECL && DECL_HARD_REGISTER (t)) { pp_string (buffer, " __asm__ "); pp_character (buffer, '('); dump_generic_node (buffer, DECL_ASSEMBLER_NAME (t), spc, flags, false); pp_character (buffer, ')'); } /* The initial value of a function serves to determine whether the function is declared or defined. So the following does not apply to function nodes. */ if (TREE_CODE (t) != FUNCTION_DECL) { /* Print the initial value. */ if (DECL_INITIAL (t)) { pp_space (buffer); pp_character (buffer, '='); pp_space (buffer); dump_generic_node (buffer, DECL_INITIAL (t), spc, flags, false); } } if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t)) { pp_string (buffer, " [value-expr: "); dump_generic_node (buffer, DECL_VALUE_EXPR (t), spc, flags, false); pp_character (buffer, ']'); } pp_character (buffer, ';'); } /* Prints a structure: name, fields, and methods. FIXME: Still incomplete. */ static void print_struct_decl (pretty_printer *buffer, const_tree node, int spc, int flags) { /* Print the name of the structure. */ if (TYPE_NAME (node)) { INDENT (spc); if (TREE_CODE (node) == RECORD_TYPE) pp_string (buffer, "struct "); else if ((TREE_CODE (node) == UNION_TYPE || TREE_CODE (node) == QUAL_UNION_TYPE)) pp_string (buffer, "union "); dump_generic_node (buffer, TYPE_NAME (node), spc, 0, false); } /* Print the contents of the structure. */ pp_newline (buffer); INDENT (spc); pp_character (buffer, '{'); pp_newline (buffer); /* Print the fields of the structure. */ { tree tmp; tmp = TYPE_FIELDS (node); while (tmp) { /* Avoid to print recursively the structure. */ /* FIXME : Not implemented correctly..., what about the case when we have a cycle in the contain graph? ... Maybe this could be solved by looking at the scope in which the structure was declared. */ if (TREE_TYPE (tmp) != node && (TREE_CODE (TREE_TYPE (tmp)) != POINTER_TYPE || TREE_TYPE (TREE_TYPE (tmp)) != node)) { print_declaration (buffer, tmp, spc+2, flags); pp_newline (buffer); } tmp = TREE_CHAIN (tmp); } } INDENT (spc); pp_character (buffer, '}'); } /* Return the priority of the operator CODE. From lowest to highest precedence with either left-to-right (L-R) or right-to-left (R-L) associativity]: 1 [L-R] , 2 [R-L] = += -= *= /= %= &= ^= |= <<= >>= 3 [R-L] ?: 4 [L-R] || 5 [L-R] && 6 [L-R] | 7 [L-R] ^ 8 [L-R] & 9 [L-R] == != 10 [L-R] < <= > >= 11 [L-R] << >> 12 [L-R] + - 13 [L-R] * / % 14 [R-L] ! ~ ++ -- + - * & (type) sizeof 15 [L-R] fn() [] -> . unary +, - and * have higher precedence than the corresponding binary operators. */ int op_code_prio (enum tree_code code) { switch (code) { case TREE_LIST: case COMPOUND_EXPR: case BIND_EXPR: return 1; case MODIFY_EXPR: case INIT_EXPR: return 2; case COND_EXPR: return 3; case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: return 4; case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: return 5; case BIT_IOR_EXPR: return 6; case BIT_XOR_EXPR: case TRUTH_XOR_EXPR: return 7; case BIT_AND_EXPR: return 8; case EQ_EXPR: case NE_EXPR: return 9; case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: case ORDERED_EXPR: case UNORDERED_EXPR: case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: return 10; case LSHIFT_EXPR: case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: return 11; case WIDEN_SUM_EXPR: case PLUS_EXPR: case POINTER_PLUS_EXPR: case MINUS_EXPR: return 12; case VEC_WIDEN_MULT_HI_EXPR: case VEC_WIDEN_MULT_LO_EXPR: case WIDEN_MULT_EXPR: case DOT_PROD_EXPR: case MULT_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case RDIV_EXPR: case EXACT_DIV_EXPR: case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: return 13; case TRUTH_NOT_EXPR: case BIT_NOT_EXPR: case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case NEGATE_EXPR: case ALIGN_INDIRECT_REF: case MISALIGNED_INDIRECT_REF: case INDIRECT_REF: case ADDR_EXPR: case FLOAT_EXPR: CASE_CONVERT: case FIX_TRUNC_EXPR: case TARGET_EXPR: return 14; case CALL_EXPR: case ARRAY_REF: case ARRAY_RANGE_REF: case COMPONENT_REF: return 15; /* Special expressions. */ case MIN_EXPR: case MAX_EXPR: case ABS_EXPR: case REALPART_EXPR: case IMAGPART_EXPR: case REDUC_MAX_EXPR: case REDUC_MIN_EXPR: case REDUC_PLUS_EXPR: case VEC_LSHIFT_EXPR: case VEC_RSHIFT_EXPR: case VEC_UNPACK_HI_EXPR: case VEC_UNPACK_LO_EXPR: case VEC_UNPACK_FLOAT_HI_EXPR: case VEC_UNPACK_FLOAT_LO_EXPR: case VEC_PACK_TRUNC_EXPR: case VEC_PACK_SAT_EXPR: return 16; default: /* Return an arbitrarily high precedence to avoid surrounding single VAR_DECLs in ()s. */ return 9999; } } /* Return the priority of the operator OP. */ int op_prio (const_tree op) { enum tree_code code; if (op == NULL) return 9999; code = TREE_CODE (op); if (code == SAVE_EXPR || code == NON_LVALUE_EXPR) return op_prio (TREE_OPERAND (op, 0)); return op_code_prio (code); } /* Return the symbol associated with operator CODE. */ const char * op_symbol_code (enum tree_code code) { switch (code) { case MODIFY_EXPR: return "="; case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: return "||"; case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: return "&&"; case BIT_IOR_EXPR: return "|"; case TRUTH_XOR_EXPR: case BIT_XOR_EXPR: return "^"; case ADDR_EXPR: case BIT_AND_EXPR: return "&"; case ORDERED_EXPR: return "ord"; case UNORDERED_EXPR: return "unord"; case EQ_EXPR: return "=="; case UNEQ_EXPR: return "u=="; case NE_EXPR: return "!="; case LT_EXPR: return "<"; case UNLT_EXPR: return "u<"; case LE_EXPR: return "<="; case UNLE_EXPR: return "u<="; case GT_EXPR: return ">"; case UNGT_EXPR: return "u>"; case GE_EXPR: return ">="; case UNGE_EXPR: return "u>="; case LTGT_EXPR: return "<>"; case LSHIFT_EXPR: return "<<"; case RSHIFT_EXPR: return ">>"; case LROTATE_EXPR: return "r<<"; case RROTATE_EXPR: return "r>>"; case VEC_LSHIFT_EXPR: return "v<<"; case VEC_RSHIFT_EXPR: return "v>>"; case POINTER_PLUS_EXPR: return "+"; case PLUS_EXPR: return "+"; case REDUC_PLUS_EXPR: return "r+"; case WIDEN_SUM_EXPR: return "w+"; case WIDEN_MULT_EXPR: return "w*"; case NEGATE_EXPR: case MINUS_EXPR: return "-"; case BIT_NOT_EXPR: return "~"; case TRUTH_NOT_EXPR: return "!"; case MULT_EXPR: case INDIRECT_REF: return "*"; case ALIGN_INDIRECT_REF: return "A*"; case MISALIGNED_INDIRECT_REF: return "M*"; case TRUNC_DIV_EXPR: case RDIV_EXPR: return "/"; case CEIL_DIV_EXPR: return "/[cl]"; case FLOOR_DIV_EXPR: return "/[fl]"; case ROUND_DIV_EXPR: return "/[rd]"; case EXACT_DIV_EXPR: return "/[ex]"; case TRUNC_MOD_EXPR: return "%"; case CEIL_MOD_EXPR: return "%[cl]"; case FLOOR_MOD_EXPR: return "%[fl]"; case ROUND_MOD_EXPR: return "%[rd]"; case PREDECREMENT_EXPR: return " --"; case PREINCREMENT_EXPR: return " ++"; case POSTDECREMENT_EXPR: return "-- "; case POSTINCREMENT_EXPR: return "++ "; case MAX_EXPR: return "max"; case MIN_EXPR: return "min"; default: return "<<< ??? >>>"; } } /* Return the symbol associated with operator OP. */ static const char * op_symbol (const_tree op) { return op_symbol_code (TREE_CODE (op)); } /* Prints the name of a call. NODE is the CALL_EXPR_FN of a CALL_EXPR or the gimple_call_fn of a GIMPLE_CALL. */ void print_call_name (pretty_printer *buffer, tree node, int flags) { tree op0 = node; if (TREE_CODE (op0) == NON_LVALUE_EXPR) op0 = TREE_OPERAND (op0, 0); again: switch (TREE_CODE (op0)) { case VAR_DECL: case PARM_DECL: case FUNCTION_DECL: dump_function_name (buffer, op0, flags); break; case ADDR_EXPR: case INDIRECT_REF: case NOP_EXPR: op0 = TREE_OPERAND (op0, 0); goto again; case COND_EXPR: pp_string (buffer, "("); dump_generic_node (buffer, TREE_OPERAND (op0, 0), 0, flags, false); pp_string (buffer, ") ? "); dump_generic_node (buffer, TREE_OPERAND (op0, 1), 0, flags, false); pp_string (buffer, " : "); dump_generic_node (buffer, TREE_OPERAND (op0, 2), 0, flags, false); break; case ARRAY_REF: if (TREE_CODE (TREE_OPERAND (op0, 0)) == VAR_DECL) dump_function_name (buffer, TREE_OPERAND (op0, 0), flags); else dump_generic_node (buffer, op0, 0, flags, false); break; case COMPONENT_REF: case SSA_NAME: case OBJ_TYPE_REF: dump_generic_node (buffer, op0, 0, flags, false); break; default: NIY; } } /* Parses the string STR and replaces new-lines by '\n', tabs by '\t', ... */ static void pretty_print_string (pretty_printer *buffer, const char *str) { if (str == NULL) return; while (*str) { switch (str[0]) { case '\b': pp_string (buffer, "\\b"); break; case '\f': pp_string (buffer, "\\f"); break; case '\n': pp_string (buffer, "\\n"); break; case '\r': pp_string (buffer, "\\r"); break; case '\t': pp_string (buffer, "\\t"); break; case '\v': pp_string (buffer, "\\v"); break; case '\\': pp_string (buffer, "\\\\"); break; case '\"': pp_string (buffer, "\\\""); break; case '\'': pp_string (buffer, "\\'"); break; /* No need to handle \0; the loop terminates on \0. */ case '\1': pp_string (buffer, "\\1"); break; case '\2': pp_string (buffer, "\\2"); break; case '\3': pp_string (buffer, "\\3"); break; case '\4': pp_string (buffer, "\\4"); break; case '\5': pp_string (buffer, "\\5"); break; case '\6': pp_string (buffer, "\\6"); break; case '\7': pp_string (buffer, "\\7"); break; default: pp_character (buffer, str[0]); break; } str++; } } static void maybe_init_pretty_print (FILE *file) { if (!initialized) { pp_construct (&buffer, /* prefix */NULL, /* line-width */0); pp_needs_newline (&buffer) = true; pp_translate_identifiers (&buffer) = false; initialized = 1; } buffer.buffer->stream = file; } static void newline_and_indent (pretty_printer *buffer, int spc) { pp_newline (buffer); INDENT (spc); }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 4; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4)); ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(4*t2-Nz,4));t3<=min(min(min(floord(4*t2+Ny,4),floord(Nt+Ny-4,4)),floord(2*t1+Ny+1,4)),floord(4*t1-4*t2+Nz+Ny-1,4));t3++) { for (t4=max(max(max(0,ceild(t1-511,512)),ceild(4*t2-Nz-1020,1024)),ceild(4*t3-Ny-1020,1024));t4<=min(min(min(min(floord(4*t2+Nx,1024),floord(4*t3+Nx,1024)),floord(Nt+Nx-4,1024)),floord(2*t1+Nx+1,1024)),floord(4*t1-4*t2+Nz+Nx-1,1024));t4++) { for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),4*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),4*t3+2),1024*t4+1022),4*t1-4*t2+Nz+1);t5++) { for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) { lbv=max(1024*t4,t5+1); ubv=min(1024*t4+1023,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
opencl_gpg_fmt_plug.c
/* * Modified by Dhiru Kholia <dhiru at openwall.com> for GPG format. * * This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Converted to use 'common' code, Feb29-Mar1 2016, JimF. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_gpg; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_gpg); #else #include <stdint.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #include "misc.h" #include "common-opencl.h" #include "options.h" #include "gpg_common.h" #define FORMAT_LABEL "gpg-opencl" #define FORMAT_NAME "OpenPGP / GnuPG Secret Key" #define ALGORITHM_NAME "SHA1/SHA2 OpenCL" #define SALT_SIZE sizeof(struct gpg_common_custom_salt*) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH]; } gpg_password; typedef struct { uint8_t v[32]; } gpg_hash; typedef struct { uint32_t length; uint32_t count; uint32_t key_len; uint8_t salt[SALT_LENGTH]; } gpg_salt; struct fmt_tests gpg_tests[] = { // from GPU /* SHA1-CAST5 salt-iter */ {"$gpg$*1*667*2048*387de4c9e2c1018aed84af75922ecaa92d1bc68d48042144c77dfe168de1fd654e4db77bfbc60ec68f283483382413cbfddddcfad714922b2d558f8729f705fbf973ab1839e756c26207a4bc8796eeb567bf9817f73a2a81728d3e4bc0894f62ad96e04e60752d84ebc01316703b0fd0f618f6120289373347027924606712610c583b25be57c8a130bc4dd796964f3f03188baa057d6b8b1fd36675af94d45847eeefe7fff63b755a32e8abe26b7f3f58bb091e5c7b9250afe2180b3d0abdd2c1db3d4fffe25e17d5b7d5b79367d98c523a6c280aafef5c1975a42fd97242ba86ced73c5e1a9bcab82adadd11ef2b64c3aad23bc930e62fc8def6b1d362e954795d87fa789e5bc2807bfdc69bba7e66065e3e3c2df0c25eab0fde39fbe54f32b26f07d88f8b05202e55874a1fa37d540a5af541e28370f27fe094ca8758cd7ff7b28df1cbc475713d7604b1af22fd758ebb3a83876ed83f003285bc8fdc7a5470f7c5a9e8a93929941692a9ff9f1bc146dcc02aab47e2679297d894f28b62da16c8baa95cd393d838fa63efc9d3f88de93dc970c67022d5dc88dce25decec8848f8e6f263d7c2c0238d36aa0013d7edefd43dac1299a54eb460d9b82cb53cf86fcb7c8d5dba95795a1adeb729a705b47b8317594ac3906424b2c0e425343eca019e53d927e6bc32688bd9e87ee808fb1d8eeee8ab938855131b839776c7da79a33a6d66e57eadb430ef04809009794e32a03a7e030b8792be5d53ceaf480ffd98633d1993c43f536a90bdbec8b9a827d0e0a49155450389beb53af5c214c4ec09712d83b175671358d8e9d54da7a8187f72aaaca5203372841af9b89a07b8aadecafc0f2901b8aec13a5382c6f94712d629333b301afdf52bdfa62534de2b10078cd4d0e781c88efdfe4e5252e39a236af449d4d62081cee630ab*3*254*2*3*8*b1fdf3772bb57e1f*65536*2127ccd55e721ba0", "polished"}, /* SHA1-CAST5 salt-iter */ {"$gpg$*1*668*2048*e5f3ef815854f90dfdc3ad61c9c92e512a53d7203b8a5665a8b00ac5ed92340a6ed74855b976fc451588cc5d51776b71657830f2c311859022a25412ee6746622febff8184824454c15a50d64c18b097af28d3939f5c5aa9589060f25923b8f7247e5a2130fb8241b8cc07a33f70391de7f54d84703d2537b4d1c307bdf824c6be24c6e36501e1754cc552551174ed51a2f958d17c6a5bd3b4f75d7979537ee1d5dcd974876afb93f2bcda7468a589d8dba9b36afbe019c9086d257f3f047e3ff896e52783f13219989307bf277e04a5d949113fc4efcc747334f307a448b949ee61b1db326892a9198789f9253994a0412dd704d9e083000b63fa07096d9d547e3235f7577ecd49199c9c3edfa3e43f65d6c506363d23c21561707f790e17ea25b7a7fce863b3c952218a3ac649002143c9b02df5c47ed033b9a1462d515580b10ac79ebdca61babb020400115f1e9fad26318a32294034ea4cbaf681c7b1de12c4ddb99dd4e39e6c8f13a322826dda4bb0ad22981b17f9e0c4d50d7203e205fb2ee6ded117a87e47b58f58f442635837f2debc6fcfbaebba09cff8b2e855d48d9b96c9a9fb020f66c97dffe53bba316ef756c797557f2334331eecaedf1ab331747dc0af6e9e1e4c8e2ef9ed2889a5facf72f1c43a24a6591b2ef5128ee872d299d32f8c0f1edf2bcc35f453ce27c534862ba2c9f60b65b641e5487f5be53783d79e8c1e5f62fe336d8854a8121946ea14c49e26ff2b2db36cef81390da7b7a8d31f7e131dccc32e6828a32b13f7a56a28d0a28afa8705adbf60cb195b602dd8161d8b6d8feff12b16eb1ac463eaa6ae0fd9c2d906d43d36543ef33659a04cf4e69e99b8455d666139e8860879d7e933e6c5d995dd13e6aaa492b21325f23cbadb1bc0884093ac43651829a6fe5fe4c138aff867eac253569d0dc6*3*254*2*3*8*e318a03635a19291*65536*06af8a67764f5674", "blingbling"}, /* SHA1-CAST5 salt-iter */ {"$gpg$*1*668*2048*8487ca407790457c30467936e109d968bdff7fa4f4d87b0af92e2384627ca546f2898e5f77c00300db87a3388476e2de74f058b8743c2d59ada316bc81c79fdd31e403e46390e3e614f81187fb0ae4ca26ed53a0822ace48026aa8a8f0abdf17d17d72dfa1eba7a763bbd72f1a1a8c020d02d7189bd95b12368155697f5e4e013f7c81f671ca320e72b61def43d3e2cb3d23d105b19fe161f2789a3c81363639b4258c855c5acd1dd6596c46593b2bfec23d319b58d4514196b2e41980fbb05f376a098049f3258f9cdf1628c6ff780963e2c8dc26728d33c6733fbac6e415bd16d924a087269e8351dd1c6129d1ac7925f19d7c9a9ed3b08a53e207ffbfba1d43891da68e39749775b38cbe9e6831def4b4297ce7446d09944583367f58205a4f986d5a84c8cf3871a7e2b6c4e2c94ff1df51cd94aecf7a76cd6991a785c66c78f686e6c47add9e27a6b00a2e709f1383f131e3b83b05c812b2ec76e732d713b780c381b0785f136cd00de7afa0276c95c5f0bb3a4b6ad484d56e390c11f9d975729ae1665189190fd131f49109f899735fd2c2efbafd8b971b196d18aeff70decc9768381f0b2243a05db99bd5911d5b94770ee315e1fe3ab0e090aa460d2c8d06a06fef254fd5fa8967386f1f5d37ea6f667215965eefe3fc6bc131f2883c02925a2a4f05dabc48f05867e68bb68741b6fb3193b7c51b7d053f6fd45108e496b9f8f2810fa75ffe454209e2249f06cc1bfc838a97436ebd64001b9619513bcb519132ce39435ed0d7c84ec0c6013e786eef5f9e23738debc70a68a389040e8caad6bd5bb486e43395e570f8780d3f1d837d2dc2657bbded89f76b06c28c5a58ecaa25a225d3d4513ee8dc8655907905590737b971035f690ac145b2d4322ecc86831f36b39d1490064b2aa27b23084a3a0b029e49a52b6a608219*3*254*2*3*8*0409f810febe5e05*65536*ce0e64511258eecc", "njokuani."}, /* SHA1-CAST5 salt-iter */ {"$gpg$*1*348*1024*e5fbff62d94b41de7fc9f3dd93685aa6e03a2c0fcd75282b25892c74922ec66c7327933087304d34d1f5c0acca5659b704b34a67b0d8dedcb53a10aee14c2615527696705d3ab826d53af457b346206c96ef4980847d02129677c5e21045abe1a57be8c0bf7495b2040d7db0169c70f59994bba4c9a13451d38b14bd13d8fe190cdc693ee207d8adfd8f51023b7502c7c8df5a3c46275acad6314d4d528df37896f7b9e53adf641fe444e18674d59cf46d5a6dffdc2f05e077346bf42fe35937e95f644a58a2370012d993c5008e6d6ff0c66c6d0d0b2f1c22961b6d12563a117897675f6b317bc71e4f2dbf6b9fff23186da2724a584d70401136e8c500784df462ea6548db4eecc782e79afe52fd8c1106c7841c085b8d44465d7a1910161d6c707a377a72f85c39fcb4ee58e6b2f617b6c4b173a52f171854f0e1927fa9fcd9d5799e16d840f06234698cfc333f0ad42129e618c2b9c5b29b17b7*3*254*2*3*8*7353cf09958435f9*9961472*efadea6cd5f3e5a7", "openwall"}, /* SHA1-CAST5 salt-iter */ {"$gpg$*1*668*2048*97b296b60904f6d505344b5b0aa277b0f40de05788a39cd9c39b14a56b607bd5db65e8da6111149a1725d06a4b52bdddf0e467e26fe13f72aa5570a0ea591eec2e24d3e9dd7534f26ec9198c8056ea1c03a88161fec88afd43474d31bf89756860c2bc6a6bc9e2a4a2fc6fef30f8cd2f74da6c301ccd5863f3240d1a2db7cbaa2df3a8efe0950f6200cbc10556393583a6ebb2e041095fc62ae3a9e4a0c5c830d73faa72aa8167b7b714ab85d927382d77bbfffb3f7c8184711e81cf9ec2ca03906e151750181500238f7814d2242721b2307baa9ea66e39b10a4fdad30ee6bff50d79ceac604618e74469ae3c80e7711c16fc85233a9eac39941a564b38513c1591502cde7cbd47a4d02a5d7d5ceceb7ff920ee40c29383bd7779be1e00b60354dd86ca514aa30e8f1523efcffdac1292198fe96983cb989a259a4aa475ed9b4ce34ae2282b3ba0169b2e82f9dee476eff215db33632cdcc72a65ba2e68d8e3f1fed90aaa68c4c886927b733144fb7225f1208cd6a108e675cc0cb11393db7451d883abb6adc58699393b8b7b7e19c8584b6fc95720ced39eabaa1124f423cc70f38385c4e9c4b4eeb39e73e891da01299c0e6ce1e97e1750a5c615e28f486c6a0e4da52c15285e7cf26ac859f5f4190e2804ad81ba4f8403e6358fbf1d48c7d593c3bac20a403010926877db3b9d7d0aaacd713a2b9833aff88d1e6b4d228532a66fe68449ad0d706ca7563fe8c2ec77062cc33244a515f2023701c052f0dd172b7914d497fdaefabd91a199d6cb2b62c71472f52c65d6a67d97d7713d39e91f347d2bc73b421fb5c6c6ba028555e5a92a535aabf7a4234d6ea8a315d8e6dcc82087cc76ec8a7b2366cecf176647538968e804541b79a1b602156970d1b943eb2641f2b123e45d7cace9f2dc84b704938fa8c7579a859ef87eca46*3*254*2*3*8*d911a3f73b050340*2097152*347e15bee29eb77d", "password"}, /* SHA1-CAST5 salt-iter, DSA key */ {"$gpg$*17*42*1024*d974ae70cfbf8ab058b2e1d898add67ab1272535e8c4b9c5bd671adce22d08d5db941a60e0715b4f0c9d*3*254*2*3*8*a9e85673bb9199d8*11534336*71e35b85cddfe2af", "crackme"}, /* gpg --gen-key --s2k-digest-algo SHA256 --s2k-cipher-algo AES */ {"$gpg$*1*668*2048*92f639f5a56692a0fb3bd32ca5d91099b49d4cf283da7d272ed51bdf337a4960e361eeb302d418c3f9620d94a077bcf888b56f892d87e2f330ecab3934ebc080ac440b4bb7cd1f79565f0a8b7331c2302d725451fbeff51ff2f25e69708555edfb353dfcab9ce33f6071ccaa2d32ad93a73082be621a8ec43a66f984551607d1e366892386e2f3cc0bdf6447216d0fbc8402c86d54cf0fd8fc133c4899a5a4b1b36cedfb5b11e804856885a7230def7718684f99f995df24f985706f0c1311d15d9a043b6a0096f5e0bb751c61a07517372441887de0532b35d5e4f9d5b35b2119715ca51a4a59227a3031fbd24f25d06ae8b6d17c1b5998aba281733cc6260930916c0d4fb84bf0cf4e7112b07bf5d78a97716599be4bed78d741757ea7149db2d1c9ff35d3b69f80dd7152ed99642b695c88c0f075ffd8a360f30a3e6160d2c5b99e41418f47ac6f9615c1a4d73b0f05c8d11d8ea18b9ea6bf9e6d2a7642f253b7ee742389a9dc19bb81261061b578609b73ad314e6e5c6afe68640abc62f5009e659fa64790689f7befe5009e396cc63d79493e56371a080c0c94c8f0036dbe9ac5a8861befc5882168f7866ec225641a2cf91d8318fcf660699d1e0272b4e0df7751c84e48513a5d26c27a12bf7f9e6965321a97f0b8162f4861fea9c78ee4bc3110b2d412f38081781f0aba5a43b92af148c4e3d9affa1f6b3a42cfcf7c7275b95445777ae51ed200bdb30606432ff05d132232ee9e8a92eba811b96422ba3390f3dbe23f8d6c5ed5cbee361f980e58394c0a8d0f9e9e1186dbb5defcf5bf3c9b44f55598a0b119b71a8bd8edf6428555e36e76785954997f40409beeea578740fb77334c4a396bfac3a24f8628212737ff6d7ffa3802e7bacd06e3e81344eebd1e60a72efa5f45e09151f55d838fda78007190c040851e5f67*3*254*8*7*16*1d1d7a3090537117d6d18e3b8dc41433*65536*d5285754134a9a05", "12345678"}, /* gpg --gen-key --s2k-digest-algo SHA256 --s2k-cipher-algo CAMELLIA128 */ {"$gpg$*1*668*2048*cce4298ada379aa74719ec266478e8711d7aa880ac552a15577ecb35c5d2f48a78d2b2fa1b015764c747b4632a7fe53675f2e117c64c0c4312e182c45ddcf988ed402de11ee93294e465070e052d198313bb822e88f31bcb1e3206271d8a5833d035effdce53648167663790e502574d1c5cf51fad8ae968bb155b75b22306f65cc37b27e0d6ba9b8e39b567c4853b41b21b9556b21a96f7f20477784118614b821e47d80ebc168d8b763e2bddfc37b7c55af838c9cff3be0e18da6da8f3671ab3c990fe541aedbb2ea8b43060f8cba4651baa4b8c498740064b95511c1e55d2125f99d636aec077ea0a606c1e9d9c919f0ed7f54a877907b45c544e534a843d8fded7334e66b74acc0a67b7ad6ffc317e93215e63ad083515d2394841ba52476096537cf0c436016031698d1497c7983e37fcd8ce4f184b6daa31cb5a2d7631355fc561bf681e309f6474163278ba8fd25e3dcc28342cc3b5c288d3cc95bc1c0746cc082b78f43cf3161d9c6551d56fbf23d83a8e10ae9380f754a2c0b74b93359d1b16213bb81625f301493ba6b347a1e5fb79745f7c8e317814e0e861f4fdb85f988f48ead7012f8e13a58fa07e33761efe64cb39b4bcf1f19d1f8b14f5bfc46c7703922273582bd99c266492247b2281c2565c03fe5270f0e858036ea4c994d4afd2029cc184a877189817dce9b5da2c8f89ea8914a0cc29dc4786aef6638e1983467ff574d2a4cc704bef7a7070c3b2bbb2f23e7c0fd8cf00365decae26a2d8ab45093587b3f8c3224bf7b8dd6c4a43853ef5c9c6eb6df0f2a77b126f55b49f77de5dc382a8327ed6fa24f379a4e9d1296cb0a9066b902f510aca6560f9e50bdd9663a269cdba41dd212dac569845c13226f2cd5311527705b24d698cb0acfb44b8a60bb4d3113ef2cb2cc7d597a889612c7f73aca5f8fd70a7*3*254*8*11*16*65a45645f3abe401f3345713d8eadfdf*65536*48e94f48bcda5a55", "abc"}, /* gpg --gen-key --s2k-digest-algo SHA256 --s2k-cipher-algo AES256 */ {"$gpg$*1*668*2048*4cb57f4b39dc6fc9137f99c9f4799e7a7b3dfa40fe6890e18b7b32c866aa8c63aa35ee1b3d2b5a223120a145fd066d082674552c6c89169c6f9a06efb238ba69c7d8b7826501bdbf6b92dfd7c97f5b9388a2afa6a8f985dbc8c962c56ed92a9f6dca3566e98647df5d31fec885608623e830fcf3346177a0e572dfe23610ae90c323bbb4cc54d857b7ea7642477c490a2fc875f3f7cc7889367f7ba3161df2a6c48218a06468146deeb66fc2d754420b3a967f418696eec725ad7d3093dc17924a2770949dd68f8efa79ddfdccbc7c23091fa7342a72b02f8288a14e7b9c51653a7d4f6044456b72a46033e3eb1855708c3bd310e10fb0f460ac362008d08526cb255e8a3efea5f6741a314b71d5fb811e42d1b3be79e546fcd52bc4d18ce3dcbe6c0b1816c25047bc8d81cbf21b57ba6bb12ab363fb17dd51176a5231e15b2740a66aff37d5b74547fc2af2448e6e83cf2ecbc7f897724e3d802becabdcf9ff2b2d977e45ff170899b1c3714a293b783ef758152c3072ad20a8b36b661c0af40c24e277dcefb3a869cce9a1e7f3afbd0abdbcbf87c309d2cb3fe36bd0069dd60da6651dc6e557d486953ef98699bee86b82baaa412f41c5952b3bec9ab43329f895a76dfd3e0e46bcd10277b1f57dfe43375a330c5c6e953c890c9e075f24fc1a9bdc38ea2ecaf0a4bc58026a545eacc317aee3eeebb39725b3ea6e1171ad600576b36e3d592909b73a4a3841c97a38db51f2579cd93d23560b9486e6a2d4d0a966efb31225c79d3214ed9da5b31b235b26f98a2b2f62f01684cf959056e978fd4ede44f4feaa35a8d411010a0a6df89a5d41eef39d64edea9c6dd79aa3ce9fdb4b41e88389776aafaedb3372e26633f13a63c4a62d2546e9b0c1e0d542991a2f8e9d76a630a20707d42073374308a409fe2a05b1476de07bb25679*3*254*8*9*16*ccdac5fce9ae3ec503390424a918aedb*65536*7dfbd9389fd9de2c", "openwall"}, /* SHA256-AES256 salt-iter */ {"$gpg$*1*348*1024*8f58917c41a894a4a3cdc138161c111312e404a1a27bb19f3234656c805ca9374bbfce59750688c6d84ba2387a4cd48330f504812cf074eba9c4da11d057d0a2662e3c7d9969e1256e40a56cce925fba29f4975ddb8619004def3502a0e7cf2ec818695c158243f21da34440eea1fec20418c7cf7dbe2230279ba9858201c00ae1d412aea1f59a66538fb739a15297ff9de39860e2782bf60a3979a5577a448925a0bc2d24c6bf3d09500046487a60bf5945e1a37b73a93eebb15cfd08c5279a942e150affbbe0d3084ab8aef2b6d9f16dc37b84334d91b80cf6f7b6c2e82d3c2be42afd39827dac16b4581be2d2f01d9703f2b19c16e149414fdfdcf8794aa90804e4b1dac8725bd0b0be52513848973eeadd5ec06d8a1da8ac072800fcc9c579172b58d39db5bc00bc0d7a21cf85fb6c7ce10f64edde425074cb9d1b4b078790aed2a46e79dc7fa4b8f3751111a2ff06f083a20c7d73d6bbc747e0*3*254*8*9*16*5b68d216aa46f2c1ed0f01234ebb6e06*131072*6c18b4661b884405", "openwall"}, /* gpg --gen-key --s2k-digest-algo SHA512 --s2k-cipher-algo AES */ {"$gpg$*1*668*2048*1de86a75cca20667506b71e729cf77e10ec148a948a94199910506e783eba52bf074f5d1d1f4819adbe28c7b51b464069ba3e44fceb62eef3038d3dfe8f7bc6012c9abc35769439730a8aabe99e4603fd2201303e82b413617d8fbaf95fdaee3d16d38a74df86a814f487e78b5c84093187529ebc54232a945628205b2eaf13ffeb41f94b1482a73f3aeb97f297d2398d94be2782a1f24244430cf251553dce8571c99ccbd6fe46e6863b25fe132420d1f49acdf9bf413c2155a794b5cf45cea8bc4d958fee20b5523cc42343a106fca60068f93aedd6d4f6021bee5a22b70969c1c8369a615de3f46867bc9364d0cdde141672c102ae42cb338c21d0ec6dd4eec923345201b3b3f97e94b7f60defb2a733616cdcd50c4254689441ab25d3ffe8adb56ef6654f35b446f05a56eef24a4bcdd52cc2b4590667f56d31c6182a757ad0ca1d1377cb04ac3a0711b25cb978ce51f19b5affe648153fa96ee3204b4043478ea20903aa7ff7f2f71cfcff802de73d709776d2dcf611d2936366c7a42edd7ab12ce4cf354eef5c27118ee89f3bb6f9de37b8e64e6db3071ea0b6de83ed27568e25672b56eacad2fee9a8872ea17b6a5fef7e14c3fece236842d2cef0c2044dbdcb2a3b317f64aaad1703844e0ebe1a5e0a90f137b62735d65dc51cf0357abe7ffd25d41d0e23fa9fc03b1be7b73f6fb8a9db04aed18cec473b0c93ffd981cc54cfd779e116c46ee621f3aa4b2e16a8ab8017a234cf26ac77f433e4544bd5761c8b263ae1b8023f6d1aca73bae1d2da5bbf7824406f5e2ff976fbf6e4b9484f020e9346648435d341de2e06a9e607059f847c5007a078dec2f08f466fc219ea5c4762d678af9b4737466e6af46edab495305a4d30124cc30d67fd3d38787cf763e362fe4484722e22b5f584e7cf64ec2c05390252a23583da9ca*3*254*10*7*16*5dfa8dd3acc0c05f3b1666f0e9243ef9*65536*75807ba0c8e4917f", "12345678"}, /* gpg --gen-key --s2k-digest-algo SHA512 --s2k-cipher-algo AES */ {"$gpg$*1*668*2048*fc59c117c35e67a90ee4fabc451a0a61d719d9f913cd796a3d1cc5dd788a9df62bff604ca19a3ee23ea068e3d0747d1897a5ceee21646799f4734ec4a2d02574255f6eace9674e368c2b4588b8892541ab53907795e25b9afd849d9b1d99f3e90b2b3520caa4262e318b63d3796339878752aaeb9ca636c57a5a9fc12ba621954acead99129d6e87d80674bdce027cd8e7e9865f1ca8ea66f41e298807447f89df5f9a701b42f9f153f43ee16d4e0e2ec7688ab68640553bd5db14c6d9469346e510ea31554537aca0a2108a353be41e1af12a62b78463576d5978d104f22e2b39296181c0a67e5d96f60ad5e1e2693ed37e1d20ed97712c0af5e774d30bf244bd6392a24cd2afdd1b44d856c5363006ccaad5fbd8a9b0afee03c1c326718a97b141297133267cbd128c45e753a6eff6d903e6c937322f72e62f1abe04d0c344eecc3e49b512bb1fe819b8a231502a3f1182bcc0387b0ad65342b97722330c2f271e5e9e21da40b59fd92af047dc4840f40e2c3f8b1fb8acb8cd33ac32e8d3d37eb60d682b45a2ff14623416330f978d90a07f1ec377ccb7ef8288d5ca8cfe31d486dfb748e53b42bb99d3eb674e5462bcb9ff3a8e1b2542780356073f75bb5dd110ac9670d89362ec6f29f988600da58b2d3d446f279e402b09ef4f3160ce5cd0e13861f735c40b7d0bc2b6447ce27b9aaf5c0358745e6e1f108eb1321fd0f4eb8cd5065ebf6bef9b7e097fb217eba65cc26c59e6553c2badfae570cc709cff0b32b398be68b19b4597e9889fc1163cc8e7a77a09cf3dcc63cbaee12c8be34a7eee47edc71bc11b91a939a7ca2dc5d305a1edddcc172f309873a2c8cbcb9caf8e11710e681b310f12678edd211fb3d0bb93c606253c5096c189e3be5cbc28633647e3d3b8ca14af6c76ce450b9258c241ef41d87f46cc33e790a1de*3*254*10*7*16*19424e6ddf44d9af244edc31e7090900*65536*fa31f69128e5fe9c", "abcdef"}, {NULL} }; static int *cracked; static int any_cracked; static cl_int cl_error; static gpg_password *inbuffer; static gpg_hash *outbuffer; static gpg_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; static struct fmt_main *self; static cl_kernel crypt_kernel_sha256, crypt_kernel_sha512; size_t insize, outsize, settingsize, cracked_size; #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" static const char *warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(gpg_password) * gws; outsize = sizeof(gpg_hash) * gws; settingsize = sizeof(gpg_salt); cracked_size = sizeof(*cracked) * gws; inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); cracked = mem_calloc(1, cracked_size); // Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); // SHA-1 S2K HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); // SHA-256 S2K HANDLE_CLERROR(clSetKernelArg(crypt_kernel_sha256, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel_sha256, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel_sha256, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); // SHA-512 S2K HANDLE_CLERROR(clSetKernelArg(crypt_kernel_sha512, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel_sha512, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel_sha512, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (cracked) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(cracked); } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DPLAINTEXT_LENGTH=%d -DSALT_LENGTH=%d", PLAINTEXT_LENGTH, SALT_LENGTH); opencl_init("$JOHN/kernels/gpg_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "gpg", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); crypt_kernel_sha256 = clCreateKernel(program[gpu_id], "gpg_sha256", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); crypt_kernel_sha512 = clCreateKernel(program[gpu_id], "gpg_sha512", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(gpg_password), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 300); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static int valid(char *ciphertext, struct fmt_main *self) { return gpg_common_valid(ciphertext, self, 0); } static void set_salt(void *salt) { gpg_common_cur_salt = *(struct gpg_common_custom_salt **)salt; currentsalt.length = SALT_LENGTH; memcpy((char*)currentsalt.salt, gpg_common_cur_salt->salt, currentsalt.length); currentsalt.count = gpg_common_cur_salt->count; currentsalt.key_len = gpg_common_keySize(gpg_common_cur_salt->cipher_algorithm); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy setting to gpu"); } #undef set_key static void set_key(char *key, int index) { uint32_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint32_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } // Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); // Run kernel if (gpg_common_cur_salt->hash_algorithm == HASH_SHA1) { BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); } else if (gpg_common_cur_salt->hash_algorithm == HASH_SHA256) { BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel_sha256, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); } else if (gpg_common_cur_salt->hash_algorithm == HASH_SHA512) { BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel_sha512, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); } // Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) if (gpg_common_check(outbuffer[index].v, gpg_common_keySize(gpg_common_cur_salt->cipher_algorithm))) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } /* * Report gpg --s2k-count n as 1st tunable cost, * hash algorithm as 2nd tunable cost, * cipher algorithm as 3rd tunable cost. */ struct fmt_main fmt_opencl_gpg = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_DYNA_SALT | FMT_HUGE_INPUT, { "s2k-count", /* only for gpg --s2k-mode 3, see man gpg, option --s2k-count n */ "hash algorithm [2:SHA1 8:SHA256 10:SHA512]", "cipher algorithm [1:IDEA 2:3DES 3:CAST5 4:Blowfish 7:AES128 8:AES192 9:AES256 10:Twofish 11:Camellia128 12:Camellia192 13:Camellia256]", }, { FORMAT_TAG }, gpg_tests }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, gpg_common_get_salt, { gpg_common_gpg_s2k_count, gpg_common_gpg_hash_algorithm, gpg_common_gpg_cipher_algorithm, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
IndexedFaceMesh.h
#ifndef __INDEXEDFACEMESH_H__ #define __INDEXEDFACEMESH_H__ #include <vector> #include "Common/Common.h" #include <iterator> namespace Utilities { class IndexedFaceMesh { public: struct Edge { unsigned int m_face[2]; unsigned int m_vert[2]; }; struct Face { unsigned int *m_edges; }; // Stores the indices of each face connected to a specific vertex struct VertexFaces { VertexFaces() { m_fIndices = 0; m_numFaces = 0; } VertexFaces(VertexFaces const& other) { *this = other; } VertexFaces& operator=(VertexFaces const& other) { m_numFaces = other.m_numFaces; m_fIndices = new unsigned int[m_numFaces]; #if defined(WIN32) || defined(_WIN32) || defined(WIN64) std::copy(other.m_fIndices, other.m_fIndices + m_numFaces, stdext::unchecked_array_iterator<unsigned int*>(m_fIndices)); #else std::copy(other.m_fIndices, other.m_fIndices + m_numFaces, m_fIndices); #endif return *this; } ~VertexFaces() { delete[] m_fIndices; } unsigned int m_numFaces; unsigned int* m_fIndices; }; // Stores the indices of each edge connected to a specific vertex struct VertexEdges { VertexEdges() { m_eIndices = 0; m_numEdges = 0; } VertexEdges(VertexEdges const& other) { *this = other; } VertexEdges& operator=(VertexEdges const& other) { m_numEdges = other.m_numEdges; m_eIndices = new unsigned int[m_numEdges]; #if defined(WIN32) || defined(_WIN32) || defined(WIN64) std::copy(other.m_eIndices, other.m_eIndices + m_numEdges, stdext::unchecked_array_iterator<unsigned int*>(m_eIndices)); #else std::copy(other.m_eIndices, other.m_eIndices + m_numEdges, m_eIndices); #endif return *this; } ~VertexEdges() { delete[] m_eIndices; } unsigned int m_numEdges; unsigned int* m_eIndices; }; public: typedef std::vector<unsigned int> Faces; typedef std::vector<Vector3r, Alloc_Vector3r> FaceNormals; typedef std::vector<Vector3r, Alloc_Vector3r> VertexNormals; typedef std::vector<Face> FaceData; typedef std::vector<Edge> Edges; typedef std::vector<VertexFaces> VerticesFaces; typedef std::vector<VertexEdges> VerticesEdges; typedef std::vector<unsigned int> UVIndices; typedef std::vector<Vector2r, Alloc_Vector2r> UVs; protected: unsigned int m_numPoints; Faces m_indices; Edges m_edges; FaceData m_faces; bool m_closed; UVIndices m_uvIndices; UVs m_uvs; VerticesFaces m_verticesFaces; VerticesEdges m_verticesEdges; unsigned int m_verticesPerFace; FaceNormals m_normals; VertexNormals m_vertexNormals; public: IndexedFaceMesh(const unsigned int verticesPerFace = 3); IndexedFaceMesh(IndexedFaceMesh const& other); IndexedFaceMesh& operator=(IndexedFaceMesh const& other); ~IndexedFaceMesh(); void release(); bool isClosed() const; void initMesh(const unsigned int nPoints, const unsigned int nEdges, const unsigned int nFaces); void addFace(const unsigned int * const indices); void addFace(const int * const indices); void addUV(const Real u, const Real v); void addUVIndex(const unsigned int index); const Faces& getFaces() const { return m_indices; } Faces& getFaces(){ return m_indices; } const FaceNormals& getFaceNormals() const { return m_normals; } FaceNormals& getFaceNormals(){ return m_normals; } const VertexNormals& getVertexNormals() const { return m_vertexNormals; } VertexNormals& getVertexNormals(){ return m_vertexNormals; } Edges& getEdges() { return m_edges; } const Edges& getEdges() const { return m_edges; } const FaceData& getFaceData() const { return m_faces; } const UVIndices& getUVIndices() const { return m_uvIndices; } const UVs& getUVs() const { return m_uvs; } const VerticesFaces& getVertexFaces() const { return m_verticesFaces; } const VerticesEdges& getVertexEdges() const { return m_verticesEdges; } unsigned int numVertices() const { return m_numPoints; } unsigned int numFaces() const { return (unsigned int)m_indices.size() / m_verticesPerFace; } unsigned int numEdges() const { return (unsigned int)m_edges.size(); } unsigned int numUVs() const { return (unsigned int)m_uvs.size(); } void copyUVs(const UVIndices& uvIndices, const UVs& uvs); void buildNeighbors(); template<class PositionData> void updateNormals(const PositionData &pd, const unsigned int offset); template<class PositionData> void updateVertexNormals(const PositionData &pd); unsigned int getVerticesPerFace() const; }; template<class PositionData> void IndexedFaceMesh::updateNormals(const PositionData &pd, const unsigned int offset) { m_normals.resize(numFaces()); #pragma omp parallel default(shared) { #pragma omp for schedule(static) for (int i = 0; i < (int) numFaces(); i++) { // Get first three points of face const Vector3r &a = pd.getPosition(m_indices[m_verticesPerFace*i] + offset); const Vector3r &b = pd.getPosition(m_indices[m_verticesPerFace*i + 1] + offset); const Vector3r &c = pd.getPosition(m_indices[m_verticesPerFace*i + 2] + offset); // Create normal Vector3r v1 = b - a; Vector3r v2 = c - a; m_normals[i] = v1.cross(v2); m_normals[i].normalize(); } } } template<class PositionData> void IndexedFaceMesh::updateVertexNormals(const PositionData &pd) { m_vertexNormals.resize(numVertices()); for (unsigned int i = 0; i < numVertices(); i++) { m_vertexNormals[i].setZero(); } for (unsigned int i = 0u; i < numFaces(); i++) { const Vector3r &n = m_normals[i]; m_vertexNormals[m_indices[m_verticesPerFace*i]] += n; m_vertexNormals[m_indices[m_verticesPerFace*i + 1]] += n; m_vertexNormals[m_indices[m_verticesPerFace*i + 2]] += n; } for (unsigned int i = 0; i < numVertices(); i++) { m_vertexNormals[i].normalize(); } } } #endif
GB_bitmap_expand_to_hyper.c
//------------------------------------------------------------------------------ // GB_bitmap_expand_to_hyper: expand a compact bitmap C to hypersparse //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #define GB_FREE_ALL \ { \ GB_phbix_free (C) ; \ GB_FREE (&Cp, Cp_size) ; \ GB_FREE (&Ch, Ch_size) ; \ GB_FREE (&Ci, Ci_size) ; \ } #include "GB_mxm.h" GrB_Info GB_bitmap_expand_to_hyper ( // input/output: GrB_Matrix C, // input int64_t cvlen_final, int64_t cvdim_final, GrB_Matrix A, GrB_Matrix B, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (C != NULL && (GB_IS_BITMAP (C) || GB_IS_FULL (C))) ; ASSERT (A != NULL && B != NULL) ; GBURBLE ("(expand bitmap/full to hyper) ") ; ASSERT_MATRIX_OK (C, "C to expand from bitmap/full to hyper", GB0) ; ASSERT_MATRIX_OK (A, "A for expand C from bitmap/full to hyper", GB0) ; ASSERT_MATRIX_OK (B, "B for expand C from bitmap/full to hyper", GB0) ; int64_t cvlen = C->vlen ; int64_t cvdim = C->vdim ; int64_t cnz = cvlen * cvdim ; bool A_is_hyper = GB_IS_HYPERSPARSE (A) ; bool B_is_hyper = GB_IS_HYPERSPARSE (B) ; // C is currently a subset of its final dimension, in bitmap or full form. // It is converted back into sparse/hypersparse form, with zombies if // bitmap, and expanded in size to be cvlen_final by cvdim_final (A->vdim // by B->vdim for C=A'*B, or A->vlen by B->vdim for C=A*B). //---------------------------------------------------------------------- // allocate the sparse/hypersparse structure of the final C //---------------------------------------------------------------------- int64_t *restrict Cp = NULL ; size_t Cp_size = 0 ; int64_t *restrict Ch = NULL ; size_t Ch_size = 0 ; int64_t *restrict Ci = NULL ; size_t Ci_size = 0 ; Cp = GB_MALLOC (cvdim+1, int64_t, &Cp_size) ; Ch = NULL ; if (B_is_hyper) { Ch = GB_MALLOC (cvdim, int64_t, &Ch_size) ; } Ci = GB_MALLOC (cnz, int64_t, &Ci_size) ; if (Cp == NULL || (B_is_hyper && Ch == NULL) || Ci == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // construct the hyperlist of C, if B is hypersparse //---------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (cvdim, chunk, nthreads_max) ; if (B_is_hyper) { // C becomes hypersparse ASSERT (cvdim == B->nvec) ; GB_memcpy (Ch, B->h, cvdim * sizeof (int64_t), nthreads) ; } //---------------------------------------------------------------------- // construct the vector pointers of C //---------------------------------------------------------------------- int64_t pC ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (pC = 0 ; pC < cvdim+1 ; pC++) { Cp [pC] = pC * cvlen ; } //---------------------------------------------------------------------- // construct the pattern of C from its bitmap //---------------------------------------------------------------------- // C(i,j) becomes a zombie if not present in the bitmap nthreads = GB_nthreads (cnz, chunk, nthreads_max) ; int8_t *restrict Cb = C->b ; bool C_is_bitmap = (Cb != NULL) ; if (C_is_bitmap) { // C is bitmap if (A_is_hyper) { // only for C=A'*B GrB_Index *restrict Ah = (GrB_Index *) A->h ; ASSERT (cvlen == A->nvec) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (pC = 0 ; pC < cnz ; pC++) { int64_t i = Ah [pC % cvlen] ; Ci [pC] = (Cb [pC]) ? i : GB_FLIP (i) ; } } else { // for C=A'*B or C=A*B ASSERT (cvlen == cvlen_final) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (pC = 0 ; pC < cnz ; pC++) { int64_t i = pC % cvlen ; Ci [pC] = (Cb [pC]) ? i : GB_FLIP (i) ; } } } else { // C is full if (A_is_hyper) { // only for C=A'*B GrB_Index *restrict Ah = (GrB_Index *) A->h ; ASSERT (cvlen == A->nvec) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (pC = 0 ; pC < cnz ; pC++) { int64_t i = Ah [pC % cvlen] ; Ci [pC] = i ; } } else { // for C=A'*B or C=A*B ASSERT (cvlen == cvlen_final) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (pC = 0 ; pC < cnz ; pC++) { int64_t i = pC % cvlen ; Ci [pC] = i ; } } } //---------------------------------------------------------------------- // transplant the new content and finalize C //---------------------------------------------------------------------- C->p = Cp ; Cp = NULL ; C->p_size = Cp_size ; C->h = Ch ; Ch = NULL ; C->h_size = Ch_size ; C->i = Ci ; Ci = NULL ; C->i_size = Ci_size ; C->nzombies = (C_is_bitmap) ? (cnz - C->nvals) : 0 ; C->vdim = cvdim_final ; C->vlen = cvlen_final ; C->nvals = -1 ; C->nvec = cvdim ; C->plen = cvdim ; C->nvec_nonempty = (cvlen == 0) ? 0 : cvdim ; // free the bitmap, if present GB_FREE ((&C->b), C->b_size) ; // C is now sparse or hypersparse ASSERT_MATRIX_OK (C, "C expanded from bitmap/full to hyper", GB0) ; ASSERT (GB_ZOMBIES_OK (C)) ; return (GrB_SUCCESS) ; }
GB_binop__isne_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isne_int32) // A.*B function (eWiseMult): GB (_AemultB_08__isne_int32) // A.*B function (eWiseMult): GB (_AemultB_02__isne_int32) // A.*B function (eWiseMult): GB (_AemultB_04__isne_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_int32) // A*D function (colscale): GB (_AxD__isne_int32) // D*A function (rowscale): GB (_DxB__isne_int32) // C+=B function (dense accum): GB (_Cdense_accumB__isne_int32) // C+=b function (dense accum): GB (_Cdense_accumb__isne_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_int32) // C=scalar+B GB (_bind1st__isne_int32) // C=scalar+B' GB (_bind1st_tran__isne_int32) // C=A+scalar GB (_bind2nd__isne_int32) // C=A'+scalar GB (_bind2nd_tran__isne_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_INT32 || GxB_NO_ISNE_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isne_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isne_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isne_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isne_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isne_int32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isne_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isne_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isne_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isne_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isne_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isne_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isne_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__isne_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__isne_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__ne_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__ne_fc64 // A.*B function (eWiseMult): GB_AemultB__ne_fc64 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__ne_fc64 // C+=b function (dense accum): GB_Cdense_accumb__ne_fc64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ne_fc64 // C=scalar+B GB_bind1st__ne_fc64 // C=scalar+B' GB_bind1st_tran__ne_fc64 // C=A+scalar GB_bind2nd__ne_fc64 // C=A'+scalar GB_bind2nd_tran__ne_fc64 // C type: bool // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = GB_FC64_ne (aij, bij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ GxB_FC64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = (creal (Ax [pA]) != 0) || (cimag (Ax [pA]) != 0) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = (creal (Bx [pB]) != 0) || (cimag (Bx [pB]) != 0) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = GB_FC64_ne (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_FC64 || GxB_NO_NE_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__ne_fc64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__ne_fc64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__ne_fc64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__ne_fc64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__ne_fc64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__ne_fc64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t bij = Bx [p] ; Cx [p] = GB_FC64_ne (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__ne_fc64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; Cx [p] = GB_FC64_ne (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_FC64_ne (x, aij) ; \ } GrB_Info GB_bind1st_tran__ne_fc64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_FC64_ne (aij, y) ; \ } GrB_Info GB_bind2nd_tran__ne_fc64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Stmt.h
//===- Stmt.h - Classes for representing statements -------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iterator> #include <string> namespace llvm { class FoldingSetNodeID; } // namespace llvm namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class LabelDecl; class ODRHash; class PrinterHelper; struct PrintingPolicy; class RecordDecl; class SourceManager; class StringLiteral; class Token; class VarDecl; //===----------------------------------------------------------------------===// // AST classes for statements. //===----------------------------------------------------------------------===// /// Stmt - This represents one statement. /// class alignas(void *) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: friend class ASTStmtReader; friend class ASTStmtWriter; void *operator new(size_t bytes) noexcept { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void *data) noexcept { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } //===--- Statement bitfields classes ---===// class StmtBitfields { friend class Stmt; /// The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class NullStmtBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class NullStmt; unsigned : NumStmtBits; /// True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode unsigned HasLeadingEmptyMacro : 1; /// The location of the semi-colon. SourceLocation SemiLoc; }; class CompoundStmtBitfields { friend class ASTStmtReader; friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; /// The location of the opening "{". SourceLocation LBraceLoc; }; class LabelStmtBitfields { friend class LabelStmt; unsigned : NumStmtBits; SourceLocation IdentLoc; }; class AttributedStmtBitfields { friend class ASTStmtReader; friend class AttributedStmt; unsigned : NumStmtBits; /// Number of attributes. unsigned NumAttrs : 32 - NumStmtBits; /// The location of the attribute. SourceLocation AttrLoc; }; class IfStmtBitfields { friend class ASTStmtReader; friend class IfStmt; unsigned : NumStmtBits; /// True if this if statement is a constexpr if. unsigned IsConstexpr : 1; /// True if this if statement has storage for an else statement. unsigned HasElse : 1; /// True if this if statement has storage for a variable declaration. unsigned HasVar : 1; /// True if this if statement has storage for an init statement. unsigned HasInit : 1; /// The location of the "if". SourceLocation IfLoc; }; class SwitchStmtBitfields { friend class SwitchStmt; unsigned : NumStmtBits; /// True if the SwitchStmt has storage for an init statement. unsigned HasInit : 1; /// True if the SwitchStmt has storage for a condition variable. unsigned HasVar : 1; /// If the SwitchStmt is a switch on an enum value, records whether all /// the enum values were covered by CaseStmts. The coverage information /// value is meant to be a hint for possible clients. unsigned AllEnumCasesCovered : 1; /// The location of the "switch". SourceLocation SwitchLoc; }; class WhileStmtBitfields { friend class ASTStmtReader; friend class WhileStmt; unsigned : NumStmtBits; /// True if the WhileStmt has storage for a condition variable. unsigned HasVar : 1; /// The location of the "while". SourceLocation WhileLoc; }; class DoStmtBitfields { friend class DoStmt; unsigned : NumStmtBits; /// The location of the "do". SourceLocation DoLoc; }; class ForStmtBitfields { friend class ForStmt; unsigned : NumStmtBits; /// The location of the "for". SourceLocation ForLoc; }; class GotoStmtBitfields { friend class GotoStmt; friend class IndirectGotoStmt; unsigned : NumStmtBits; /// The location of the "goto". SourceLocation GotoLoc; }; class ContinueStmtBitfields { friend class ContinueStmt; unsigned : NumStmtBits; /// The location of the "continue". SourceLocation ContinueLoc; }; class BreakStmtBitfields { friend class BreakStmt; unsigned : NumStmtBits; /// The location of the "break". SourceLocation BreakLoc; }; class ReturnStmtBitfields { friend class ReturnStmt; unsigned : NumStmtBits; /// True if this ReturnStmt has storage for an NRVO candidate. unsigned HasNRVOCandidate : 1; /// The location of the "return". SourceLocation RetLoc; }; class SwitchCaseBitfields { friend class SwitchCase; friend class CaseStmt; unsigned : NumStmtBits; /// Used by CaseStmt to store whether it is a case statement /// of the form case LHS ... RHS (a GNU extension). unsigned CaseStmtIsGNURange : 1; /// The location of the "case" or "default" keyword. SourceLocation KeywordLoc; }; //===--- Expression bitfields classes ---===// class ExprBitfields { friend class ASTStmtReader; // deserialization friend class AtomicExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class CallExpr; // ctor friend class CXXConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class CXXNewExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class DeclRefExpr; // computeDependence friend class DependentScopeDeclRefExpr; // ctor friend class DesignatedInitExpr; // ctor friend class Expr; friend class InitListExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ObjCMessageExpr; // ctor friend class OffsetOfExpr; // ctor friend class OpaqueValueExpr; // ctor friend class OverloadExpr; // ctor friend class ParenListExpr; // ctor friend class PseudoObjectExpr; // ctor friend class ShuffleVectorExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 3; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = NumStmtBits + 9 }; class PredefinedExprBitfields { friend class ASTStmtReader; friend class PredefinedExpr; unsigned : NumExprBits; /// The kind of this PredefinedExpr. One of the enumeration values /// in PredefinedExpr::IdentKind. unsigned Kind : 4; /// True if this PredefinedExpr has a trailing "StringLiteral *" /// for the predefined identifier. unsigned HasFunctionName : 1; /// The location of this PredefinedExpr. SourceLocation Loc; }; class DeclRefExprBitfields { friend class ASTStmtReader; // deserialization friend class DeclRefExpr; unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; /// The location of the declaration name itself. SourceLocation Loc; }; enum APFloatSemantics { IEEEhalf, IEEEsingle, IEEEdouble, x87DoubleExtended, IEEEquad, PPCDoubleDouble }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class StringLiteralBitfields { friend class ASTStmtReader; friend class StringLiteral; unsigned : NumExprBits; /// The kind of this string literal. /// One of the enumeration values of StringLiteral::StringKind. unsigned Kind : 3; /// The width of a single character in bytes. Only values of 1, 2, /// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps /// the target + string kind to the appropriate CharByteWidth. unsigned CharByteWidth : 3; unsigned IsPascal : 1; /// The number of concatenated token this string is made of. /// This is the number of trailing SourceLocation. unsigned NumConcatenated; }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 3; }; class UnaryOperatorBitfields { friend class UnaryOperator; unsigned : NumExprBits; unsigned Opc : 5; unsigned CanOverflow : 1; SourceLocation Loc; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 3; unsigned IsType : 1; // true if operand is a type, false if an expression. }; class ArraySubscriptExprBitfields { friend class ArraySubscriptExpr; unsigned : NumExprBits; SourceLocation RBracketLoc; }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; /// True if the callee of the call expression was found using ADL. unsigned UsesADL : 1; /// Padding used to align OffsetToTrailingObjects to a byte multiple. unsigned : 24 - 2 - NumExprBits; /// The offset in bytes from the this pointer to the start of the /// trailing objects belonging to CallExpr. Intentionally byte sized /// for faster access. unsigned OffsetToTrailingObjects : 8; }; enum { NumCallExprBits = 32 }; class MemberExprBitfields { friend class MemberExpr; unsigned : NumExprBits; /// IsArrow - True if this is "X->F", false if this is "X.F". unsigned IsArrow : 1; /// True if this member expression used a nested-name-specifier to /// refer to the member, e.g., "x->Base::f", or found its member via /// a using declaration. When true, a MemberExprNameQualifier /// structure is allocated immediately after the MemberExpr. unsigned HasQualifierOrFoundDecl : 1; /// True if this member expression specified a template keyword /// and/or a template argument list explicitly, e.g., x->f<int>, /// x->template f, x->template f<int>. /// When true, an ASTTemplateKWAndArgsInfo structure and its /// TemplateArguments (if any) are present. unsigned HasTemplateKWAndArgsInfo : 1; /// True if this member expression refers to a method that /// was resolved from an overloaded set having size greater than 1. unsigned HadMultipleCandidates : 1; /// This is the location of the -> or . in the expression. SourceLocation OperatorLoc; }; class CastExprBitfields { friend class CastExpr; friend class ImplicitCastExpr; unsigned : NumExprBits; unsigned Kind : 6; unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr. /// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough /// here. ([implimits] Direct and indirect base classes [16384]). unsigned BasePathSize; }; class BinaryOperatorBitfields { friend class BinaryOperator; unsigned : NumExprBits; unsigned Opc : 6; /// This is only meaningful for operations on floating point /// types and 0 otherwise. unsigned FPFeatures : 3; SourceLocation OpLoc; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class ParenListExprBitfields { friend class ASTStmtReader; friend class ParenListExpr; unsigned : NumExprBits; /// The number of expressions in the paren list. unsigned NumExprs; }; class PseudoObjectExprBitfields { friend class ASTStmtReader; // deserialization friend class PseudoObjectExpr; unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; //===--- C++ Expression bitfields classes ---===// class CXXOperatorCallExprBitfields { friend class ASTStmtReader; friend class CXXOperatorCallExpr; unsigned : NumCallExprBits; /// The kind of this overloaded operator. One of the enumerator /// value of OverloadedOperatorKind. unsigned OperatorKind : 6; // Only meaningful for floating point types. unsigned FPFeatures : 3; }; class CXXBoolLiteralExprBitfields { friend class CXXBoolLiteralExpr; unsigned : NumExprBits; /// The value of the boolean literal. unsigned Value : 1; /// The location of the boolean literal. SourceLocation Loc; }; class CXXNullPtrLiteralExprBitfields { friend class CXXNullPtrLiteralExpr; unsigned : NumExprBits; /// The location of the null pointer literal. SourceLocation Loc; }; class CXXThisExprBitfields { friend class CXXThisExpr; unsigned : NumExprBits; /// Whether this is an implicit "this". unsigned IsImplicit : 1; /// The location of the "this". SourceLocation Loc; }; class CXXThrowExprBitfields { friend class ASTStmtReader; friend class CXXThrowExpr; unsigned : NumExprBits; /// Whether the thrown variable (if any) is in scope. unsigned IsThrownVariableInScope : 1; /// The location of the "throw". SourceLocation ThrowLoc; }; class CXXDefaultArgExprBitfields { friend class ASTStmtReader; friend class CXXDefaultArgExpr; unsigned : NumExprBits; /// The location where the default argument expression was used. SourceLocation Loc; }; class CXXDefaultInitExprBitfields { friend class ASTStmtReader; friend class CXXDefaultInitExpr; unsigned : NumExprBits; /// The location where the default initializer expression was used. SourceLocation Loc; }; class CXXScalarValueInitExprBitfields { friend class ASTStmtReader; friend class CXXScalarValueInitExpr; unsigned : NumExprBits; SourceLocation RParenLoc; }; class CXXNewExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class CXXNewExpr; unsigned : NumExprBits; /// Was the usage ::new, i.e. is the global new to be used? unsigned IsGlobalNew : 1; /// Do we allocate an array? If so, the first trailing "Stmt *" is the /// size expression. unsigned IsArray : 1; /// Should the alignment be passed to the allocation function? unsigned ShouldPassAlignment : 1; /// If this is an array allocation, does the usual deallocation /// function for the allocated type want to know the allocated size? unsigned UsualArrayDeleteWantsSize : 1; /// What kind of initializer do we have? Could be none, parens, or braces. /// In storage, we distinguish between "none, and no initializer expr", and /// "none, but an implicit initializer expr". unsigned StoredInitializationStyle : 2; /// True if the allocated type was expressed as a parenthesized type-id. unsigned IsParenTypeId : 1; /// The number of placement new arguments. unsigned NumPlacementArgs; }; class CXXDeleteExprBitfields { friend class ASTStmtReader; friend class CXXDeleteExpr; unsigned : NumExprBits; /// Is this a forced global delete, i.e. "::delete"? unsigned GlobalDelete : 1; /// Is this the array form of delete, i.e. "delete[]"? unsigned ArrayForm : 1; /// ArrayFormAsWritten can be different from ArrayForm if 'delete' is /// applied to pointer-to-array type (ArrayFormAsWritten will be false /// while ArrayForm will be true). unsigned ArrayFormAsWritten : 1; /// Does the usual deallocation function for the element type require /// a size_t argument? unsigned UsualArrayDeleteWantsSize : 1; /// Location of the expression. SourceLocation Loc; }; class TypeTraitExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class TypeTraitExpr; unsigned : NumExprBits; /// The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; class DependentScopeDeclRefExprBitfields { friend class ASTStmtReader; friend class ASTStmtWriter; friend class DependentScopeDeclRefExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; }; class CXXConstructExprBitfields { friend class ASTStmtReader; friend class CXXConstructExpr; unsigned : NumExprBits; unsigned Elidable : 1; unsigned HadMultipleCandidates : 1; unsigned ListInitialization : 1; unsigned StdInitListInitialization : 1; unsigned ZeroInitialization : 1; unsigned ConstructionKind : 3; SourceLocation Loc; }; class ExprWithCleanupsBitfields { friend class ASTStmtReader; // deserialization friend class ExprWithCleanups; unsigned : NumExprBits; // When false, it must not have side effects. unsigned CleanupsHaveSideEffects : 1; unsigned NumObjects : 32 - 1 - NumExprBits; }; class CXXUnresolvedConstructExprBitfields { friend class ASTStmtReader; friend class CXXUnresolvedConstructExpr; unsigned : NumExprBits; /// The number of arguments used to construct the type. unsigned NumArgs; }; class CXXDependentScopeMemberExprBitfields { friend class ASTStmtReader; friend class CXXDependentScopeMemberExpr; unsigned : NumExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether this member expression has info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// See getFirstQualifierFoundInScope() and the comment listing /// the trailing objects. unsigned HasFirstQualifierFoundInScope : 1; /// The location of the '->' or '.' operator. SourceLocation OperatorLoc; }; class OverloadExprBitfields { friend class ASTStmtReader; friend class OverloadExpr; unsigned : NumExprBits; /// Whether the name includes info for explicit template /// keyword and arguments. unsigned HasTemplateKWAndArgsInfo : 1; /// Padding used by the derived classes to store various bits. If you /// need to add some data here, shrink this padding and add your data /// above. NumOverloadExprBits also needs to be updated. unsigned : 32 - NumExprBits - 1; /// The number of results. unsigned NumResults; }; enum { NumOverloadExprBits = NumExprBits + 1 }; class UnresolvedLookupExprBitfields { friend class ASTStmtReader; friend class UnresolvedLookupExpr; unsigned : NumOverloadExprBits; /// True if these lookup results should be extended by /// argument-dependent lookup if this is the operand of a function call. unsigned RequiresADL : 1; /// True if these lookup results are overloaded. This is pretty trivially /// rederivable if we urgently need to kill this field. unsigned Overloaded : 1; }; static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4, "UnresolvedLookupExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class UnresolvedMemberExprBitfields { friend class ASTStmtReader; friend class UnresolvedMemberExpr; unsigned : NumOverloadExprBits; /// Whether this member expression used the '->' operator or /// the '.' operator. unsigned IsArrow : 1; /// Whether the lookup results contain an unresolved using declaration. unsigned HasUnresolvedUsing : 1; }; static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4, "UnresolvedMemberExprBitfields must be <= than 4 bytes to" "avoid trashing OverloadExprBitfields::NumResults!"); class CXXNoexceptExprBitfields { friend class ASTStmtReader; friend class CXXNoexceptExpr; unsigned : NumExprBits; unsigned Value : 1; }; class SubstNonTypeTemplateParmExprBitfields { friend class ASTStmtReader; friend class SubstNonTypeTemplateParmExpr; unsigned : NumExprBits; /// The location of the non-type template parameter reference. SourceLocation NameLoc; }; //===--- C++ Coroutines TS bitfields classes ---===// class CoawaitExprBitfields { friend class CoawaitExpr; unsigned : NumExprBits; unsigned IsImplicit : 1; }; //===--- Obj-C Expression bitfields classes ---===// class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; //===--- Clang Extensions bitfields classes ---===// class OpaqueValueExprBitfields { friend class ASTStmtReader; friend class OpaqueValueExpr; unsigned : NumExprBits; /// The OVE is a unique semantic reference to its source expression if this /// bit is set to true. unsigned IsUnique : 1; SourceLocation Loc; }; union { // Same order as in StmtNodes.td. // Statements StmtBitfields StmtBits; NullStmtBitfields NullStmtBits; CompoundStmtBitfields CompoundStmtBits; LabelStmtBitfields LabelStmtBits; AttributedStmtBitfields AttributedStmtBits; IfStmtBitfields IfStmtBits; SwitchStmtBitfields SwitchStmtBits; WhileStmtBitfields WhileStmtBits; DoStmtBitfields DoStmtBits; ForStmtBitfields ForStmtBits; GotoStmtBitfields GotoStmtBits; ContinueStmtBitfields ContinueStmtBits; BreakStmtBitfields BreakStmtBits; ReturnStmtBitfields ReturnStmtBits; SwitchCaseBitfields SwitchCaseBits; // Expressions ExprBitfields ExprBits; PredefinedExprBitfields PredefinedExprBits; DeclRefExprBitfields DeclRefExprBits; FloatingLiteralBitfields FloatingLiteralBits; StringLiteralBitfields StringLiteralBits; CharacterLiteralBitfields CharacterLiteralBits; UnaryOperatorBitfields UnaryOperatorBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; ArraySubscriptExprBitfields ArraySubscriptExprBits; CallExprBitfields CallExprBits; MemberExprBitfields MemberExprBits; CastExprBitfields CastExprBits; BinaryOperatorBitfields BinaryOperatorBits; InitListExprBitfields InitListExprBits; ParenListExprBitfields ParenListExprBits; PseudoObjectExprBitfields PseudoObjectExprBits; // C++ Expressions CXXOperatorCallExprBitfields CXXOperatorCallExprBits; CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits; CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits; CXXThisExprBitfields CXXThisExprBits; CXXThrowExprBitfields CXXThrowExprBits; CXXDefaultArgExprBitfields CXXDefaultArgExprBits; CXXDefaultInitExprBitfields CXXDefaultInitExprBits; CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits; CXXNewExprBitfields CXXNewExprBits; CXXDeleteExprBitfields CXXDeleteExprBits; TypeTraitExprBitfields TypeTraitExprBits; DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits; CXXConstructExprBitfields CXXConstructExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits; CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits; OverloadExprBitfields OverloadExprBits; UnresolvedLookupExprBitfields UnresolvedLookupExprBits; UnresolvedMemberExprBitfields UnresolvedMemberExprBits; CXXNoexceptExprBitfields CXXNoexceptExprBits; SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits; // C++ Coroutines TS expressions CoawaitExprBitfields CoawaitBits; // Obj-C Expressions ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; // Clang Extensions OpaqueValueExprBitfields OpaqueValueExprBits; }; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void *operator new(size_t bytes, void *mem) noexcept { return mem; } void operator delete(void *, const ASTContext &, unsigned) noexcept {} void operator delete(void *, const ASTContext *, unsigned) noexcept {} void operator delete(void *, size_t) noexcept {} void operator delete(void *, void *) noexcept {} public: /// A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell {}; protected: /// Iterator for iterating over Stmt * arrays that contain only Expr * /// /// This is needed because AST nodes use Stmt* arrays to store /// references to children (to be compatible with StmtIterator). struct ExprIterator : llvm::iterator_adaptor_base<ExprIterator, Stmt **, std::random_access_iterator_tag, Expr *> { ExprIterator() : iterator_adaptor_base(nullptr) {} ExprIterator(Stmt **I) : iterator_adaptor_base(I) {} reference operator*() const { assert((*I)->getStmtClass() >= firstExprConstant && (*I)->getStmtClass() <= lastExprConstant); return *reinterpret_cast<Expr **>(I); } }; /// Const iterator for iterating over Stmt * arrays that contain only Expr * struct ConstExprIterator : llvm::iterator_adaptor_base<ConstExprIterator, const Stmt *const *, std::random_access_iterator_tag, const Expr *const> { ConstExprIterator() : iterator_adaptor_base(nullptr) {} ConstExprIterator(const Stmt *const *I) : iterator_adaptor_base(I) {} reference operator*() const { assert((*I)->getStmtClass() >= firstExprConstant && (*I)->getStmtClass() <= lastExprConstant); return *reinterpret_cast<const Expr *const *>(I); } }; private: /// Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt(StmtClass SC) { static_assert(sizeof(*this) <= 8, "changing bitfields changed sizeof(Stmt)"); static_assert(sizeof(*this) % alignof(void *) == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getBeginLoc() const LLVM_READONLY; SourceLocation getEndLoc() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// \return Unique reproducible object identifier int64_t getID(const ASTContext &Context) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0, StringRef NewlineSymbol = "\n", const ASTContext *Context = nullptr) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip past any implicit AST nodes which might surround this /// statement, such as ExprWithCleanups or ImplicitCastExpr nodes. Stmt *IgnoreImplicit(); const Stmt *IgnoreImplicit() const { return const_cast<Stmt *>(this)->IgnoreImplicit(); } /// Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const { return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured); } const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<Stmt *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_iterator child_begin() { return children().begin(); } child_iterator child_end() { return children().end(); } const_child_iterator child_begin() const { return children().begin(); } const_child_iterator child_end() const { return children().end(); } /// Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; /// Calculate a unique representation for a statement that is /// stable across compiler invocations. /// /// \param ID profile information will be stored in ID. /// /// \param Hash an ODRHash object which will be called where pointers would /// have been used in the Profile function. void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {} /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } using decl_iterator = DeclGroupRef::iterator; using const_decl_iterator = DeclGroupRef::const_iterator; using decl_range = llvm::iterator_range<decl_iterator>; using decl_const_range = llvm::iterator_range<const_decl_iterator>; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } using reverse_decl_iterator = std::reverse_iterator<decl_iterator>; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass) { NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro; setSemiLoc(L); } /// Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {} SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; } void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; } bool hasLeadingEmptyMacro() const { return NullStmtBits.HasLeadingEmptyMacro; } SourceLocation getBeginLoc() const { return getSemiLoc(); } SourceLocation getEndLoc() const { return getSemiLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// CompoundStmt - This represents a group of statements like { stmt stmt }. class CompoundStmt final : public Stmt, private llvm::TrailingObjects<CompoundStmt, Stmt *> { friend class ASTStmtReader; friend TrailingObjects; /// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits. SourceLocation RBraceLoc; CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {} void setStmts(ArrayRef<Stmt *> Stmts); public: static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB); // Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; CompoundStmtBits.LBraceLoc = Loc; } // Build an empty compound statement. static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } using body_iterator = Stmt **; using body_range = llvm::iterator_range<body_iterator>; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return getTrailingObjects<Stmt *>(); } body_iterator body_end() { return body_begin() + size(); } Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; } Stmt *body_back() { return !body_empty() ? body_begin()[size() - 1] : nullptr; } void setLastStmt(Stmt *S) { assert(!body_empty() && "setLastStmt"); body_begin()[size() - 1] = S; } using const_body_iterator = Stmt *const *; using body_const_range = llvm::iterator_range<const_body_iterator>; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return getTrailingObjects<Stmt *>(); } const_body_iterator body_end() const { return body_begin() + size(); } const Stmt *body_front() const { return !body_empty() ? body_begin()[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? body_begin()[size() - 1] : nullptr; } using reverse_body_iterator = std::reverse_iterator<body_iterator>; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } using const_reverse_body_iterator = std::reverse_iterator<const_body_iterator>; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getEndLoc() const { return RBraceLoc; } SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(body_begin(), body_end()); } const_child_range children() const { return const_child_range(body_begin(), body_end()); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: /// The location of the ":". SourceLocation ColonLoc; // The location of the "case" or "default" keyword. Stored in SwitchCaseBits. // SourceLocation KeywordLoc; /// A pointer to the following CaseStmt or DefaultStmt class, /// used by SwitchStmt. SwitchCase *NextSwitchCase = nullptr; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), ColonLoc(ColonLoc) { setKeywordLoc(KWLoc); } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; } void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } inline Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase *>(this)->getSubStmt(); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } inline SourceLocation getEndLoc() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; /// CaseStmt - Represent a case statement. It can optionally be a GNU case /// statement of the form LHS ... RHS representing a range of cases. class CaseStmt final : public SwitchCase, private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> { friend TrailingObjects; // CaseStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing objects // at the end but this would impact children(). // The trailing objects are in order: // // * A "Stmt *" for the LHS of the case statement. Always present. // // * A "Stmt *" for the RHS of the case statement. This is a GNU extension // which allow ranges in cases statement of the form LHS ... RHS. // Present if and only if caseStmtIsGNURange() is true. // // * A "Stmt *" for the substatement of the case statement. Always present. // // * A SourceLocation for the location of the ... if this is a case statement // with a range. Present if and only if caseStmtIsGNURange() is true. enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + caseStmtIsGNURange(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return caseStmtIsGNURange(); } unsigned lhsOffset() const { return LhsOffset; } unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); } unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; } /// Build a case statement assuming that the storage for the /// trailing objects has been properly allocated. CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { // Handle GNU case statements of the form LHS ... RHS. bool IsGNURange = rhs != nullptr; SwitchCaseBits.CaseStmtIsGNURange = IsGNURange; setLHS(lhs); setSubStmt(nullptr); if (IsGNURange) { setRHS(rhs); setEllipsisLoc(ellipsisLoc); } } /// Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange) : SwitchCase(CaseStmtClass, Empty) { SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange; } public: /// Build a case statement. static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc); /// Build an empty case statement. static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange); /// True if this case statement is of the form case LHS ... RHS, which /// is a GNU extension. In this case the RHS can be obtained with getRHS() /// and the location of the ellipsis can be obtained with getEllipsisLoc(). bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; } SourceLocation getCaseLoc() const { return getKeywordLoc(); } void setCaseLoc(SourceLocation L) { setKeywordLoc(L); } /// Get the location of the ... in a case statement of the form LHS ... RHS. SourceLocation getEllipsisLoc() const { return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } /// Set the location of the ... in a case statement of the form LHS ... RHS. /// Assert that this case statement is of this form. void setEllipsisLoc(SourceLocation L) { assert( caseStmtIsGNURange() && "setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!"); *getTrailingObjects<SourceLocation>() = L; } Expr *getLHS() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } const Expr *getLHS() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]); } void setLHS(Expr *Val) { getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val); } Expr *getRHS() { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } const Expr *getRHS() const { return caseStmtIsGNURange() ? reinterpret_cast<Expr *>( getTrailingObjects<Stmt *>()[rhsOffset()]) : nullptr; } void setRHS(Expr *Val) { assert(caseStmtIsGNURange() && "setRHS but this is not a case stmt of the form LHS ... RHS!"); getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val); } Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } const Stmt *getSubStmt() const { return getTrailingObjects<Stmt *>()[subStmtOffset()]; } void setSubStmt(Stmt *S) { getTrailingObjects<Stmt *>()[subStmtOffset()] = S; } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; class DefaultStmt : public SwitchCase { Stmt *SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) {} Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return getKeywordLoc(); } void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); } SourceLocation getBeginLoc() const { return getKeywordLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt + 1); } }; SourceLocation SwitchCase::getEndLoc() const { if (const auto *CS = dyn_cast<CaseStmt>(this)) return CS->getEndLoc(); else if (const auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getEndLoc(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } Stmt *SwitchCase::getSubStmt() { if (auto *CS = dyn_cast<CaseStmt>(this)) return CS->getSubStmt(); else if (auto *DS = dyn_cast<DefaultStmt>(this)) return DS->getSubStmt(); llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!"); } /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; class LabelStmt : public Stmt { LabelDecl *TheDecl; Stmt *SubStmt; public: /// Build a label statement. LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : Stmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) { setIdentLoc(IL); } /// Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) {} SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; } void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getBeginLoc() const { return getIdentLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } class AttributedStmt final : public Stmt, private llvm::TrailingObjects<AttributedStmt, const Attr *> { friend class ASTStmtReader; friend TrailingObjects; Stmt *SubStmt; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt) : Stmt(AttributedStmtClass), SubStmt(SubStmt) { AttributedStmtBits.NumAttrs = Attrs.size(); AttributedStmtBits.AttrLoc = Loc; std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr()); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : Stmt(AttributedStmtClass, Empty) { AttributedStmtBits.NumAttrs = NumAttrs; AttributedStmtBits.AttrLoc = SourceLocation{}; std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr); } const Attr *const *getAttrArrayPtr() const { return getTrailingObjects<const Attr *>(); } const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr *> Attrs, Stmt *SubStmt); // Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; } ArrayRef<const Attr *> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getBeginLoc() const { return getAttrLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. class IfStmt final : public Stmt, private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> { friend TrailingObjects; // IfStmt is followed by several trailing objects, some of which optional. // Note that it would be more convenient to put the optional trailing // objects at then end but this would change the order of the children. // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact a "Expr *". // // * A "Stmt *" for the then statement. // Always present. // // * A "Stmt *" for the else statement. // Present if and only if hasElseStorage(). // // * A "SourceLocation" for the location of the "else". // Present if and only if hasElseStorage(). enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() + hasInitStorage(); } unsigned numTrailingObjects(OverloadToken<SourceLocation>) const { return hasElseStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; } unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; } /// Build an if/then/else statement. IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else); /// Build an empty if/then/else statement. explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit); public: /// Create an IfStmt. static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL = SourceLocation(), Stmt *Else = nullptr); /// Create an empty IfStmt optionally with storage for an else statement, /// condition variable and init expression. static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar, bool HasInit); /// True if this IfStmt has the storage for an init statement. bool hasInitStorage() const { return IfStmtBits.HasInit; } /// True if this IfStmt has storage for a variable declaration. bool hasVarStorage() const { return IfStmtBits.HasVar; } /// True if this IfStmt has storage for an else statement. bool hasElseStorage() const { return IfStmtBits.HasElse; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; } const Stmt *getThen() const { return getTrailingObjects<Stmt *>()[thenOffset()]; } void setThen(Stmt *Then) { getTrailingObjects<Stmt *>()[thenOffset()] = Then; } Stmt *getElse() { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } const Stmt *getElse() const { return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()] : nullptr; } void setElse(Stmt *Else) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); getTrailingObjects<Stmt *>()[elseOffset()] = Else; } /// Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<IfStmt *>(this)->getConditionVariable(); } /// Set the condition variable for this if statement. /// The if statement must have storage for the condition variable. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This if statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; } void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; } SourceLocation getElseLoc() const { return hasElseStorage() ? *getTrailingObjects<SourceLocation>() : SourceLocation(); } void setElseLoc(SourceLocation ElseLoc) { assert(hasElseStorage() && "This if statement has no storage for an else statement!"); *getTrailingObjects<SourceLocation>() = ElseLoc; } bool isConstexpr() const { return IfStmtBits.IsConstexpr; } void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; } bool isObjCAvailabilityCheck() const; SourceLocation getBeginLoc() const { return getIfLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { if (getElse()) return getElse()->getEndLoc(); return getThen()->getEndLoc(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. class SwitchStmt final : public Stmt, private llvm::TrailingObjects<SwitchStmt, Stmt *> { friend TrailingObjects; /// Points to a linked list of case and default statements. SwitchCase *FirstCase; // SwitchStmt is followed by several trailing objects, // some of which optional. Note that it would be more convenient to // put the optional trailing objects at the end but this would change // the order in children(). // The trailing objects are in order: // // * A "Stmt *" for the init statement. // Present if and only if hasInitStorage(). // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. enum { InitOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage(); } unsigned initOffset() const { return InitOffset; } unsigned varOffset() const { return InitOffset + hasInitStorage(); } unsigned condOffset() const { return InitOffset + hasInitStorage() + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } /// Build a switch statement. SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond); /// Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar); public: /// Create a switch statement. static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond); /// Create an empty switch statement optionally with storage for /// an init expression and a condition variable. static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit, bool HasVar); /// True if this SwitchStmt has storage for an init statement. bool hasInitStorage() const { return SwitchStmtBits.HasInit; } /// True if this SwitchStmt has storage for a condition variable. bool hasVarStorage() const { return SwitchStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } Stmt *getInit() { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } const Stmt *getInit() const { return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()] : nullptr; } void setInit(Stmt *Init) { assert(hasInitStorage() && "This switch statement has no storage for an init statement!"); getTrailingObjects<Stmt *>()[initOffset()] = Init; } /// Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<SwitchStmt *>(this)->getConditionVariable(); } /// Set the condition variable in this switch statement. /// The switch statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *VD); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SwitchCase *getSwitchCaseList() { return FirstCase; } const SwitchCase *getSwitchCaseList() const { return FirstCase; } void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; } SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { setBody(S); setSwitchLoc(SL); } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase); FirstCase = SC; } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return SwitchStmtBits.AllEnumCasesCovered; } SourceLocation getBeginLoc() const { return getSwitchLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody() ? getBody()->getEndLoc() : reinterpret_cast<const Stmt *>(getCond())->getEndLoc(); } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. class WhileStmt final : public Stmt, private llvm::TrailingObjects<WhileStmt, Stmt *> { friend TrailingObjects; // WhileStmt is followed by several trailing objects, // some of which optional. Note that it would be more // convenient to put the optional trailing object at the end // but this would affect children(). // The trailing objects are in order: // // * A "Stmt *" for the condition variable. // Present if and only if hasVarStorage(). This is in fact a "DeclStmt *". // // * A "Stmt *" for the condition. // Always present. This is in fact an "Expr *". // // * A "Stmt *" for the body. // Always present. // enum { VarOffset = 0, BodyOffsetFromCond = 1 }; enum { NumMandatoryStmtPtr = 2 }; unsigned varOffset() const { return VarOffset; } unsigned condOffset() const { return VarOffset + hasVarStorage(); } unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; } unsigned numTrailingObjects(OverloadToken<Stmt *>) const { return NumMandatoryStmtPtr + hasVarStorage(); } /// Build a while statement. WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL); /// Build an empty while statement. explicit WhileStmt(EmptyShell Empty, bool HasVar); public: /// Create a while statement. static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body, SourceLocation WL); /// Create an empty while statement optionally with storage for /// a condition variable. static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar); /// True if this WhileStmt has storage for a condition variable. bool hasVarStorage() const { return WhileStmtBits.HasVar; } Expr *getCond() { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]); } void setCond(Expr *Cond) { getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; } const Stmt *getBody() const { return getTrailingObjects<Stmt *>()[bodyOffset()]; } void setBody(Stmt *Body) { getTrailingObjects<Stmt *>()[bodyOffset()] = Body; } /// Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable(); const VarDecl *getConditionVariable() const { return const_cast<WhileStmt *>(this)->getConditionVariable(); } /// Set the condition variable of this while statement. /// The while statement must have storage for it. void setConditionVariable(const ASTContext &Ctx, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. DeclStmt *getConditionVariableDeclStmt() { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } const DeclStmt *getConditionVariableDeclStmt() const { return hasVarStorage() ? static_cast<DeclStmt *>( getTrailingObjects<Stmt *>()[varOffset()]) : nullptr; } SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; } void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; } SourceLocation getBeginLoc() const { return getWhileLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(getTrailingObjects<Stmt *>(), getTrailingObjects<Stmt *>() + numTrailingObjects(OverloadToken<Stmt *>())); } }; /// DoStmt - This represents a 'do/while' stmt. class DoStmt : public Stmt { enum { BODY, COND, END_EXPR }; Stmt *SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) { setCond(Cond); setBody(Body); setDoLoc(DL); } /// Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {} Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr *>(SubExprs[COND]); } void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *Body) { SubExprs[BODY] = Body; } SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; } void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getDoLoc(); } SourceLocation getEndLoc() const { return getRParenLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. class ForStmt : public Stmt { enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {} Stmt *getInit() { return SubExprs[INIT]; } /// Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForStmtBits.ForLoc; } void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getBeginLoc() const { return getForLoc(); } SourceLocation getEndLoc() const { return getBody()->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// GotoStmt - This represents a direct goto. class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), LabelLoc(LL) { setGotoLoc(GL); } /// Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {} LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const { return getLabelLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// IndirectGotoStmt - This represents an indirect goto. class IndirectGotoStmt : public Stmt { SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), StarLoc(starLoc) { setTarget(target); setGotoLoc(gotoLoc); } /// Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) {} void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr *>(Target); } const Expr *getTarget() const { return reinterpret_cast<const Expr *>(Target); } void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt *>(this)->getConstantTarget(); } SourceLocation getBeginLoc() const { return getGotoLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target + 1); } }; /// ContinueStmt - This represents a continue. class ContinueStmt : public Stmt { public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) { setContinueLoc(CL); } /// Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {} SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; } SourceLocation getBeginLoc() const { return getContinueLoc(); } SourceLocation getEndLoc() const { return getContinueLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// BreakStmt - This represents a break. class BreakStmt : public Stmt { public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) { setBreakLoc(BL); } /// Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {} SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; } void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; } SourceLocation getBeginLoc() const { return getBreakLoc(); } SourceLocation getEndLoc() const { return getBreakLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. class ReturnStmt final : public Stmt, private llvm::TrailingObjects<ReturnStmt, const VarDecl *> { friend TrailingObjects; /// The return expression. Stmt *RetExpr; // ReturnStmt is followed optionally by a trailing "const VarDecl *" // for the NRVO candidate. Present if and only if hasNRVOCandidate(). /// True if this ReturnStmt has storage for an NRVO candidate. bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; } unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const { return hasNRVOCandidate(); } /// Build a return statement. ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Build an empty return statement. explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate); public: /// Create a return statement. static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate); /// Create an empty return statement, optionally with /// storage for an NRVO candidate. static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate); Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); } const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); } void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); } /// Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>() : nullptr; } /// Set the variable that might be used for the named return value /// optimization. The return statement must have storage for it, /// which is the case if and only if hasNRVOCandidate() is true. void setNRVOCandidate(const VarDecl *Var) { assert(hasNRVOCandidate() && "This return statement has no storage for an NRVO candidate!"); *getTrailingObjects<const VarDecl *>() = Var; } SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; } void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; } SourceLocation getBeginLoc() const { return getReturnLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return RetExpr ? RetExpr->getEndLoc() : getReturnLoc(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr + 1); return child_range(child_iterator(), child_iterator()); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. class AsmStmt : public Stmt { protected: friend class ASTStmtReader; SourceLocation AsmLoc; /// True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs = nullptr; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {} public: /// Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {} SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getBeginLoc() const LLVM_READONLY { return {}; } SourceLocation getEndLoc() const LLVM_READONLY { return {}; } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. using inputs_iterator = ExprIterator; using const_inputs_iterator = ConstExprIterator; using inputs_range = llvm::iterator_range<inputs_iterator>; using inputs_const_range = llvm::iterator_range<const_inputs_iterator>; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. using outputs_iterator = ExprIterator; using const_outputs_iterator = ConstExprIterator; using outputs_range = llvm::iterator_range<outputs_iterator>; using outputs_const_range = llvm::iterator_range<const_outputs_iterator>; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. class GCCAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints = nullptr; StringLiteral **Clobbers = nullptr; IdentifierInfo **Names = nullptr; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, SourceLocation rparenloc); /// Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {} SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) {} bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return {}; } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return {}; } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. class MSAsmStmt : public AsmStmt { friend class ASTStmtReader; SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks = 0; Token *AsmToks = nullptr; StringRef *Constraints = nullptr; StringRef *Clobbers = nullptr; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {} SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {} public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children, Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {} public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getEndLoc(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { friend class ASTReader; friend class ASTStmtReader; bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {} public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children, Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {} SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; } SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(child_iterator(), child_iterator()); } }; /// This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_ByCopy, VCK_VLAType, }; /// Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: friend class ASTStmtReader; /// Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr); /// Determine the kind of capture. VariableCaptureKind getCaptureKind() const; /// Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// Determine whether this capture handles a variable (by reference). bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// Determine whether this capture handles a variable by copy. bool capturesVariableByCopy() const { return getCaptureKind() == VCK_ByCopy; } /// Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const; }; private: /// The number of variable captured, including 'this'. unsigned NumCaptures; /// The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind; /// The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl = nullptr; /// Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); } Stmt *const *getStoredStmts() const { return reinterpret_cast<Stmt *const *>(this + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: friend class ASTStmtReader; static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; } /// Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl(); const CapturedDecl *getCapturedDecl() const; /// Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D); /// Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const; /// Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind); /// Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// An iterator that walks over the captures. using capture_iterator = Capture *; using const_capture_iterator = const Capture *; using capture_range = llvm::iterator_range<capture_iterator>; using capture_const_range = llvm::iterator_range<const_capture_iterator>; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// Iterator that walks over the capture initialization arguments. using capture_init_iterator = Expr **; using capture_init_range = llvm::iterator_range<capture_init_iterator>; /// Const iterator that walks over the capture initialization /// arguments. using const_capture_init_iterator = Expr *const *; using const_capture_init_range = llvm::iterator_range<const_capture_init_iterator>; capture_init_range capture_inits() { return capture_init_range(capture_init_begin(), capture_init_end()); } const_capture_init_range capture_inits() const { return const_capture_init_range(capture_init_begin(), capture_init_end()); } /// Retrieve the first initialization argument. capture_init_iterator capture_init_begin() { return reinterpret_cast<Expr **>(getStoredStmts()); } const_capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr *const *>(getStoredStmts()); } /// Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() { return capture_init_begin() + NumCaptures; } const_capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getBeginLoc() const LLVM_READONLY { return getCapturedStmt()->getBeginLoc(); } SourceLocation getEndLoc() const LLVM_READONLY { return getCapturedStmt()->getEndLoc(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); }; } // namespace clang #endif // LLVM_CLANG_AST_STMT_H
openmp_sections.c
/* OpenMP "sections" clause example Jim Teresco, CS 338, Williams College, CS 341, Mount Holyoke College Mon Feb 24 22:30:57 EST 2003 Updated for CSIS-335, Siena College, Fall 2021 */ #include <stdio.h> #include <omp.h> int main(int argc, char *argv[]) { int thread_num; #pragma omp parallel sections private(thread_num) { #pragma omp section { thread_num = omp_get_thread_num(); printf("In this section, thread_num=%d\n", thread_num); } #pragma omp section { thread_num = omp_get_thread_num(); printf("But in this section, thread_num=%d\n", thread_num); } #pragma omp section { thread_num = omp_get_thread_num(); printf("And in this section, thread_num=%d\n", thread_num); } #pragma omp section { thread_num = omp_get_thread_num(); printf("Over here in this section, thread_num=%d\n", thread_num); } #pragma omp section { thread_num = omp_get_thread_num(); printf("Yet another section, with thread_num=%d\n", thread_num); } #pragma omp section { thread_num = omp_get_thread_num(); printf("Our last section, thread_num=%d\n", thread_num); } } return 0; }
GB_unop__identity_int8_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_int8_fp64 // op(A') function: GB_unop_tran__identity_int8_fp64 // C type: int8_t // A type: double // cast: int8_t cij = GB_cast_to_int8_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int8_t z = GB_cast_to_int8_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = GB_cast_to_int8_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_int8_fp64 ( int8_t *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; int8_t z = GB_cast_to_int8_t ((double) (aij)) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; int8_t z = GB_cast_to_int8_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_int8_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
interpolation_pl.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ #include <math.h> //------------------------------------------------------------------------------------------------------------------------------ static inline void interpolation_pl_block(level_type *level_f, int id_f, double prescale_f, level_type *level_c, int id_c, blockCopy_type *block){ // interpolate 3D array from read_i,j,k of read[] to write_i,j,k in write[] int write_dim_i = block->dim.i<<1; // calculate the dimensions of the resultant fine block int write_dim_j = block->dim.j<<1; int write_dim_k = block->dim.k<<1; int read_i = block->read.i; int read_j = block->read.j; int read_k = block->read.k; int read_jStride = block->read.jStride; int read_kStride = block->read.kStride; int write_i = block->write.i; int write_j = block->write.j; int write_k = block->write.k; int write_jStride = block->write.jStride; int write_kStride = block->write.kStride; double * __restrict__ read = block->read.ptr; double * __restrict__ write = block->write.ptr; if(block->read.box >=0){ read = level_c->my_boxes[ block->read.box].vectors[ id_c] + level_c->my_boxes[ block->read.box].ghosts*(1+level_c->my_boxes[ block->read.box].jStride+level_c->my_boxes[ block->read.box].kStride); read_jStride = level_c->my_boxes[block->read.box ].jStride; read_kStride = level_c->my_boxes[block->read.box ].kStride; } if(block->write.box>=0){ write = level_f->my_boxes[block->write.box].vectors[id_f] + level_f->my_boxes[block->write.box].ghosts*(1+level_f->my_boxes[block->write.box].jStride+level_f->my_boxes[block->write.box].kStride); write_jStride = level_f->my_boxes[block->write.box].jStride; write_kStride = level_f->my_boxes[block->write.box].kStride; } int i,j,k; for(k=0;k<write_dim_k;k++){int delta_k=-read_kStride;if(k&0x1)delta_k=read_kStride; for(j=0;j<write_dim_j;j++){int delta_j=-read_jStride;if(j&0x1)delta_j=read_jStride; for(i=0;i<write_dim_i;i++){int delta_i= -1;if(i&0x1)delta_i= 1; // i.e. even points look backwards while odd points look forward int write_ijk = ((i )+write_i) + (((j )+write_j)*write_jStride) + (((k )+write_k)*write_kStride); int read_ijk = ((i>>1)+ read_i) + (((j>>1)+ read_j)* read_jStride) + (((k>>1)+ read_k)* read_kStride); // // | o | o | // +---+---+---+---+ // | | x | x | | // // CAREFUL !!! you must guarantee you zero'd the MPI buffers(write[]) and destination boxes at some point to avoid 0.0*NaN or 0.0*inf // piecewise linear interpolation... NOTE, BC's must have been previously applied write[write_ijk] = prescale_f*write[write_ijk] + 0.421875*read[read_ijk ] + 0.140625*read[read_ijk +delta_k] + 0.140625*read[read_ijk +delta_j ] + 0.046875*read[read_ijk +delta_j+delta_k] + 0.140625*read[read_ijk+delta_i ] + 0.046875*read[read_ijk+delta_i +delta_k] + 0.046875*read[read_ijk+delta_i+delta_j ] + 0.015625*read[read_ijk+delta_i+delta_j+delta_k]; }}} } //------------------------------------------------------------------------------------------------------------------------------ // perform a (inter-level) piecewise linear interpolation void interpolation_pl(level_type * level_f, int id_f, double prescale_f, level_type *level_c, int id_c){ exchange_boundary(level_c,id_c,0); apply_BCs_linear(level_c,id_c,0); uint64_t _timeCommunicationStart = CycleTime(); uint64_t _timeStart,_timeEnd; int buffer=0; int n; int my_tag = (level_f->tag<<4) | 0x7; #ifdef USE_MPI // by convention, level_f allocates a combined array of requests for both level_f recvs and level_c sends... int nMessages = level_c->interpolation.num_sends + level_f->interpolation.num_recvs; MPI_Request *recv_requests = level_f->interpolation.requests; MPI_Request *send_requests = level_f->interpolation.requests + level_f->interpolation.num_recvs; // loop through packed list of MPI receives and prepost Irecv's... _timeStart = CycleTime(); #ifdef USE_MPI_THREAD_MULTIPLE #pragma omp parallel for schedule(dynamic,1) #endif for(n=0;n<level_f->interpolation.num_recvs;n++){ MPI_Irecv(level_f->interpolation.recv_buffers[n], level_f->interpolation.recv_sizes[n], MPI_DOUBLE, level_f->interpolation.recv_ranks[n], my_tag, MPI_COMM_WORLD, &recv_requests[n] ); } _timeEnd = CycleTime(); level_f->cycles.interpolation_recv += (_timeEnd-_timeStart); // pack MPI send buffers... _timeStart = CycleTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[0]) for(buffer=0;buffer<level_c->interpolation.num_blocks[0];buffer++){ // !!! prescale==0 because you don't want to increment the MPI buffer interpolation_pl_block(level_f,id_f,0.0,level_c,id_c,&level_c->interpolation.blocks[0][buffer]); } _timeEnd = CycleTime(); level_f->cycles.interpolation_pack += (_timeEnd-_timeStart); // loop through MPI send buffers and post Isend's... _timeStart = CycleTime(); #ifdef USE_MPI_THREAD_MULTIPLE #pragma omp parallel for schedule(dynamic,1) #endif for(n=0;n<level_c->interpolation.num_sends;n++){ MPI_Isend(level_c->interpolation.send_buffers[n], level_c->interpolation.send_sizes[n], MPI_DOUBLE, level_c->interpolation.send_ranks[n], my_tag, MPI_COMM_WORLD, &send_requests[n] ); } _timeEnd = CycleTime(); level_f->cycles.interpolation_send += (_timeEnd-_timeStart); #endif // perform local interpolation... try and hide within Isend latency... _timeStart = CycleTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->interpolation.num_blocks[1]) for(buffer=0;buffer<level_c->interpolation.num_blocks[1];buffer++){ interpolation_pl_block(level_f,id_f,prescale_f,level_c,id_c,&level_c->interpolation.blocks[1][buffer]); } _timeEnd = CycleTime(); level_f->cycles.interpolation_local += (_timeEnd-_timeStart); // wait for MPI to finish... #ifdef USE_MPI _timeStart = CycleTime(); if(nMessages)MPI_Waitall(nMessages,level_f->interpolation.requests,level_f->interpolation.status); _timeEnd = CycleTime(); level_f->cycles.interpolation_wait += (_timeEnd-_timeStart); // unpack MPI receive buffers _timeStart = CycleTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_f->interpolation.num_blocks[2]) for(buffer=0;buffer<level_f->interpolation.num_blocks[2];buffer++){ IncrementBlock(level_f,id_f,prescale_f,&level_f->interpolation.blocks[2][buffer]); } _timeEnd = CycleTime(); level_f->cycles.interpolation_unpack += (_timeEnd-_timeStart); #endif level_f->cycles.interpolation_total += (uint64_t)(CycleTime()-_timeCommunicationStart); }
Vec.h
#ifndef VEC_H #define VEC_H /* Szymon Rusinkiewicz Princeton University Vec.h Class for a constant-length vector Supports the following operations: vec v1; // Initialized to (0, 0, 0) vec v2(1.23f); // Initialized to (1.23f, 1.23f, 1.23f) vec v3(1, 2, 3); // Initialized to (1, 2, 3) vec v4(v3); // Copy constructor float farray[3]; vec v5 = vec(farray); // Explicit: "v4 = farray" won't work Vec<3,double> vd; // The "vec" used above is Vec<3,float> point p1, p2, p3; // Same as vec v3 = v1 + v2; // Also -, *, / (all componentwise) v3 = 3.5f * v1; // Also vec * scalar, vec / scalar // NOTE: scalar has to be the same type: // it won't work to do double * vec<float> v1 = min(v2, v3); // Componentwise min/max v1 = sin(v2); // Componentwise - all the usual functions... swap(v1, v2); // In-place swap v3 = v1 DOT v2; // Actually operator^ v3 = v1 CROSS v2; // Actually operator% float f = v1[0]; // Subscript float *fp = v1; // Implicit conversion to float * f = len(v1); // Length (also len2 == squared length) f = dist(p1, p2); // Distance (also dist2 == squared distance) normalize(v1); // Normalize (i.e., make it unit length) // normalize(vec(0,0,0)) => vec(1,0,0) v1 = trinorm(p1,p2,p3); // Normal of triangle (area-weighted) cout << v1 << endl; // iostream output in the form (1,2,3) cin >> v2; // iostream input using the same syntax Also defines the utility functions sqr, cube, sgn, fract, clamp, mix, step, smoothstep, faceforward, reflect, refract, and angle */ // Windows defines min and max as macros, which prevents us from using the // type-safe versions from std::, as well as interfering with method defns. // Also define NOMINMAX, which prevents future bad definitions. #ifdef min # undef min #endif #ifdef max # undef max #endif #ifndef NOMINMAX # define NOMINMAX #endif #ifndef _USE_MATH_DEFINES #define _USE_MATH_DEFINES #endif #include <cstddef> #include <cmath> #include <iterator> #include <stdexcept> #include <iostream> #include <algorithm> // Let gcc optimize conditional branches a bit better... #ifndef likely # if !defined(__GNUC__) || (__GNUC__ == 2 && __GNUC_MINOR__ < 96) # define likely(x) (x) # define unlikely(x) (x) # else # define likely(x) (__builtin_expect((x), 1)) # define unlikely(x) (__builtin_expect((x), 0)) # endif #endif namespace trimesh { using ::std::size_t; // Boost-like compile-time assertion checking template <bool X> struct VEC_STATIC_ASSERTION_FAILURE; template <> struct VEC_STATIC_ASSERTION_FAILURE<true> { void operator () () {} }; #define VEC_STATIC_CHECK(expr) VEC_STATIC_ASSERTION_FAILURE<bool(expr)>() // Vec class declaration template <size_t D, class T = float> class Vec { public: // Types typedef T value_type; typedef value_type *pointer; typedef const value_type *const_pointer; typedef value_type &reference; typedef const value_type &const_reference; typedef value_type *iterator; typedef const value_type *const_iterator; typedef ::std::reverse_iterator<iterator> reverse_iterator; typedef ::std::reverse_iterator<const_iterator> const_reverse_iterator; typedef ::std::size_t size_type; typedef ::std::ptrdiff_t difference_type; protected: // The internal representation: standard array T v[D]; public: // Constructor for no arguments. Everything initialized to 0. Vec() { for (size_type i = 0; i < D; i++) v[i] = T(0); } // Uninitialized constructor - meant mostly for internal use #define VEC_UNINITIALIZED ((void *) 0) Vec(void *) {} // Constructor for one argument - default value. Explicit. explicit Vec(const T &x) { for (size_type i = 0; i < D; i++) v[i] = x; } // Constructors for 2-4 arguments Vec(const T &x, const T &y) { VEC_STATIC_CHECK(D == 2); v[0] = x; v[1] = y; } Vec(const T &x, const T &y, const T &z) { VEC_STATIC_CHECK(D == 3); v[0] = x; v[1] = y; v[2] = z; } Vec(const T &x, const T &y, const T &z, const T &w) { VEC_STATIC_CHECK(D == 4); v[0] = x; v[1] = y; v[2] = z; v[3] = w; } // Constructor from anything that can be accessed using [] // Pretty aggressive, so marked as explicit. template <class S> explicit Vec(const S &x) { for (size_type i = 0; i < D; i++) v[i] = x[i]; } // Using default copy constructor, assignment operator, and destructor // Array reference - no bounds checking reference operator [] (size_type i) { return v[i]; } reference operator [] (int i) { return v[i]; } const_reference operator [] (size_type i) const { return v[i]; } const_reference operator [] (int i) const { return v[i]; } // Array reference with bounds checking reference at(size_type i) { if (i >= D) throw ::std::out_of_range("Vec::at"); return v[i]; } const_reference at(size_type i) const { if (i >= D) throw ::std::out_of_range("Vec::at"); return v[i]; } // Other accessors, for compatibility with std::array reference front() { return v[0]; } const_reference front() const { return v[0]; } reference back() { return v[D-1]; } const_reference back() const { return v[D-1]; } // Conversion to pointer operator T * () { return v; } operator const T * () { return v; } operator const T * () const { return v; } pointer data() { return v; } const_pointer data() const { return v; } // Iterators iterator begin() { return v; } const_iterator begin() const { return v; } const_iterator cbegin() const { return v; } iterator end() { return begin() + D; } const_iterator end() const { return begin() + D; } const_iterator cend() const { return begin() + D; } reverse_iterator rbegin() { return reverse_iterator(end()); } const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); } const_reverse_iterator crbegin() const { return const_reverse_iterator(end()); } reverse_iterator rend() { return reverse_iterator(begin()); } const_reverse_iterator rend() const { return const_reverse_iterator(begin()); } const_reverse_iterator crend() const { return const_reverse_iterator(begin()); } // Capacity size_type size() const { return D; } size_type max_size() const { return D; } // empty() and clear() - check for all zero or set to zero bool empty() const { for (size_type i = 0; i < D; i++) if (v[i]) return false; return true; } void clear() { for (size_type i = 0; i < D; i++) v[i] = T(0); } // Set all elements to some constant void fill(const value_type &x) { for (size_type i = 0; i < D; i++) v[i] = x; } Vec<D,T> &operator = (const value_type &x) { for (size_type i = 0; i < D; i++) v[i] = x; return *this; } // Member operators Vec<D,T> &operator += (const Vec<D,T> &x) { for (size_type i = 0; i < D; i++) #pragma omp atomic v[i] += x[i]; return *this; } Vec<D,T> &operator -= (const Vec<D,T> &x) { for (size_type i = 0; i < D; i++) #pragma omp atomic v[i] -= x[i]; return *this; } Vec<D,T> &operator *= (const Vec<D,T> &x) { for (size_type i = 0; i < D; i++) #pragma omp atomic v[i] *= x[i]; return *this; } Vec<D,T> &operator *= (const T &x) { for (size_type i = 0; i < D; i++) #pragma omp atomic v[i] *= x; return *this; } Vec<D,T> &operator /= (const Vec<D,T> &x) { for (size_type i = 0; i < D; i++) #pragma omp atomic v[i] /= x[i]; return *this; } Vec<D,T> &operator /= (const T &x) { for (size_type i = 0; i < D; i++) #pragma omp atomic v[i] /= x; return *this; } // Set each component to min/max of this and the other vector Vec<D,T> &min(const Vec<D,T> &x) { #pragma omp critical for (size_type i = 0; i < D; i++) if (x[i] < v[i]) v[i] = x[i]; return *this; } Vec<D,T> &max(const Vec<D,T> &x) { #pragma omp critical for (size_type i = 0; i < D; i++) if (x[i] > v[i]) v[i] = x[i]; return *this; } // Swap with another vector. (Also exists as a global function.) void swap(Vec<D,T> &x) { using namespace ::std; #pragma omp critical for (size_type i = 0; i < D; i++) swap(v[i], x[i]); } // Outside of class: + - * / % ^ << >> == != < > <= >= // Dot product with another vector (also exists as an operator) value_type dot(const Vec<D,T> &x) const { value_type total = v[0] * x[0]; for (size_type i = 1; i < D; i++) total += v[i] * x[i]; return total; } // Cross product with another vector (also exists as an operator) Vec<3,T> cross(const Vec<3,T> &x) const { VEC_STATIC_CHECK(D == 3); return Vec<3,T>(v[1]*x[2] - v[2]*x[1], v[2]*x[0] - v[0]*x[2], v[0]*x[1] - v[1]*x[0]); } // Some partial compatibility with std::valarray, plus generalizations value_type sum() const { value_type total = v[0]; for (size_type i = 1; i < D; i++) total += v[i]; return total; } value_type sumabs() const { using namespace ::std; value_type total = fabs(v[0]); for (size_type i = 1; i < D; i++) total += fabs(v[i]); return total; } value_type avg() const { return sum() / D; } value_type mean() const { return sum() / D; } value_type product() const { value_type total = v[0]; for (size_type i = 1; i < D; i++) total *= v[i]; return total; } value_type min() const { value_type m = v[0]; for (size_type i = 1; i < D; i++) if (v[i] < m) m = v[i]; return m; } value_type max() const { value_type m = v[0]; for (size_type i = 1; i < D; i++) if (v[i] > m) m = v[i]; return m; } Vec<D,T> apply(value_type func(value_type)) const { Vec<D,T> result(VEC_UNINITIALIZED); for (size_type i = 0; i < D; i++) result[i] = func(v[i]); return result; } Vec<D,T> apply(value_type func(const value_type&)) const { Vec<D,T> result(VEC_UNINITIALIZED); for (size_type i = 0; i < D; i++) result[i] = func(v[i]); return result; } Vec<D,T> cshift(int n) const { Vec<D,T> result(VEC_UNINITIALIZED); if (n < 0) n = (n % D) + D; for (size_type i = 0; i < D; i++) result[i] = v[(i+n)%D]; return result; } Vec<D,T> shift(int n) const { using namespace ::std; if (abs(n) >= D) return Vec<D,T>(); Vec<D,T> result; // Must be initialized to zero size_type start = n < 0 ? -n : 0; size_type stop = n > 0 ? D - n : D; for (size_type i = start; i < stop; i++) result[i] = v[i+n]; return result; } // TODO for C++11: std::get() }; // class Vec // Shorthands for particular flavors of Vecs typedef Vec<3,float> vec; typedef Vec<3,float> point; typedef Vec<2,float> vec2; typedef Vec<3,float> vec3; typedef Vec<4,float> vec4; typedef Vec<2,int> ivec2; typedef Vec<3,int> ivec3; typedef Vec<4,int> ivec4; // Nonmember operators that take two Vecs template <size_t D, class T> static inline const Vec<D,T> operator + (const Vec<D,T> &v1, const Vec<D,T> &v2) { using namespace ::std; Vec<D,T> result(VEC_UNINITIALIZED); for (size_t i = 0; i < D; i++) result[i] = v1[i] + v2[i]; return result; } template <size_t D, class T> static inline const Vec<D,T> operator - (const Vec<D,T> &v1, const Vec<D,T> &v2) { using namespace ::std; Vec<D,T> result(VEC_UNINITIALIZED); for (size_t i = 0; i < D; i++) result[i] = v1[i] - v2[i]; return result; } template <size_t D, class T> static inline const Vec<D,T> operator * (const Vec<D,T> &v1, const Vec<D,T> &v2) { using namespace ::std; Vec<D,T> result(VEC_UNINITIALIZED); for (size_t i = 0; i < D; i++) result[i] = v1[i] * v2[i]; return result; } template <size_t D, class T> static inline const Vec<D,T> operator / (const Vec<D,T> &v1, const Vec<D,T> &v2) { using namespace ::std; Vec<D,T> result(VEC_UNINITIALIZED); for (size_t i = 0; i < D; i++) result[i] = v1[i] / v2[i]; return result; } // Dot product template <size_t D, class T> static inline const T operator ^ (const Vec<D,T> &v1, const Vec<D,T> &v2) { using namespace ::std; T sum = v1[0] * v2[0]; for (size_t i = 1; i < D; i++) sum += v1[i] * v2[i]; return sum; } #define DOT ^ // Cross product - only in 3 dimensions template <class T> static inline const Vec<3,T> operator % (const Vec<3,T> &v1, const Vec<3,T> &v2) { return Vec<3,T>(v1[1]*v2[2] - v1[2]*v2[1], v1[2]*v2[0] - v1[0]*v2[2], v1[0]*v2[1] - v1[1]*v2[0]); } #define CROSS % // Component-wise equality and inequality (#include the usual caveats // about comparing floats for equality...) template <size_t D, class T> static inline bool operator == (const Vec<D,T> &v1, const Vec<D,T> &v2) { using namespace ::std; for (size_t i = 0; i < D; i++) if (v1[i] != v2[i]) return false; return true; } template <size_t D, class T> static inline bool operator != (const Vec<D,T> &v1, const Vec<D,T> &v2) { using namespace ::std; for (size_t i = 0; i < D; i++) if (v1[i] != v2[i]) return true; return false; } // Comparison by lexicographical ordering - not necessarily useful on its own, // but necessary in order to put Vecs in sets, maps, etc. template <size_t D, class T> static inline bool operator < (const Vec<D,T> &v1, const Vec<D,T> &v2) { using namespace ::std; for (size_t i = 0; i < D; i++) { if (v1[i] < v2[i]) return true; else if (v1[i] > v2[i]) return false; } return false; } template <size_t D, class T> static inline bool operator > (const Vec<D,T> &v1, const Vec<D,T> &v2) { return v2 < v1; } template <size_t D, class T> static inline bool operator <= (const Vec<D,T> &v1, const Vec<D,T> &v2) { return !(v2 < v1); } template <size_t D, class T> static inline bool operator >= (const Vec<D,T> &v1, const Vec<D,T> &v2) { return !(v1 < v2); } // Unary operators template <size_t D, class T> static inline const Vec<D,T> &operator + (const Vec<D,T> &v) { return v; } template <size_t D, class T> static inline const Vec<D,T> operator - (const Vec<D,T> &v) { using namespace ::std; Vec<D,T> result(VEC_UNINITIALIZED); for (size_t i = 0; i < D; i++) result[i] = -v[i]; return result; } template <size_t D, class T> static inline bool operator ! (const Vec<D,T> &v) { return v.empty(); } // Vec/scalar operators template <size_t D, class T> static inline const Vec<D,T> operator * (const T &x, const Vec<D,T> &v) { using namespace ::std; Vec<D,T> result(VEC_UNINITIALIZED); for (size_t i = 0; i < D; i++) result[i] = x * v[i]; return result; } template <size_t D, class T> static inline const Vec<D,T> operator * (const Vec<D,T> &v, const T &x) { using namespace ::std; Vec<D,T> result(VEC_UNINITIALIZED); for (size_t i = 0; i < D; i++) result[i] = v[i] * x; return result; } template <size_t D, class T> static inline const Vec<D,T> operator / (const T &x, const Vec<D,T> &v) { using namespace ::std; Vec<D,T> result(VEC_UNINITIALIZED); for (size_t i = 0; i < D; i++) result[i] = x / v[i]; return result; } template <size_t D, class T> static inline const Vec<D,T> operator / (const Vec<D,T> &v, const T &x) { using namespace ::std; Vec<D,T> result(VEC_UNINITIALIZED); for (size_t i = 0; i < D; i++) result[i] = v[i] / x; return result; } // iostream operators template <size_t D, class T> static inline ::std::ostream &operator << (::std::ostream &os, const Vec<D,T> &v) { using namespace ::std; os << "("; for (size_t i = 0; i < D-1; i++) os << v[i] << ", "; return os << v[D-1] << ")"; } template <size_t D, class T> static inline ::std::istream &operator >> (::std::istream &is, Vec<D,T> &v) { using namespace ::std; char c1 = 0, c2 = 0; is >> c1; if (c1 == '(' || c1 == '[') { is >> v[0] >> ws >> c2; for (size_t i = 1; i < D; i++) { if (c2 == ',') is >> v[i] >> ws >> c2; else is.setstate(ios::failbit); } } if (c1 == '(' && c2 != ')') is.setstate(ios::failbit); else if (c1 == '[' && c2 != ']') is.setstate(ios::failbit); return is; } // Utility functions for square and cube, to go along with sqrt and cbrt template <class T> static inline T sqr(const T &x) { return x*x; } template <class T> static inline T cube(const T &x) { return x*x*x; } // Sign of a scalar. Note that sgn(0) == 1. template <class T> static inline T sgn(const T &x) { return (x < T(0)) ? T(-1) : T(1); } // Utility functions based on GLSL template <class T> static inline T fract(const T &x) { return x - floor(x); } template <class T> static inline T clamp(const T &x, const T &a, const T &b) { return x > a ? x < b ? x : b : a; // returns a on NaN } template <class T, class S> static inline T mix(const T &x, const T &y, const S &a) { return (S(1)-a) * x + a * y; } template <class T> static inline T step(const T &a, const T &x) { return x < a ? T(0) : T(1); } template <class T> static inline T smoothstep(const T &a, const T &b, const T &x) { if (b <= a) return step(x,a); T t = (x - a) / (b - a); return t <= T(0) ? T(0) : t >= T(1) ? T(1) : t * t * (T(3) - T(2) * t); } template <size_t D, class T> static inline T faceforward(const Vec<D,T> &N, const Vec<D,T> &I, const Vec<D,T> &Nref) { return ((Nref DOT I) < T(0)) ? N : -N; } template <size_t D, class T> static inline T reflect(const Vec<D,T> &I, const Vec<D,T> &N) { return I - (T(2) * (N DOT I)) * N; } template <size_t D, class T> static inline T refract(const Vec<D,T> &I, const Vec<D,T> &N, const T &eta) { using namespace ::std; T NdotI = N DOT I; T k = T(1) - sqr(eta) * (T(1) - sqr(NdotI)); return (k < T(0)) ? T(0) : eta * I - (eta * NdotI * sqrt(k)) * N; } // Squared length template <size_t D, class T> static inline const T len2(const Vec<D,T> &v) { using namespace ::std; T l2 = v[0] * v[0]; for (size_t i = 1; i < D; i++) l2 += v[i] * v[i]; return l2; } // Length template <size_t D, class T> static inline const T len(const Vec<D,T> &v) { using namespace ::std; return sqrt(len2(v)); } // Squared distance template <size_t D, class T> static inline const T dist2(const Vec<D,T> &v1, const Vec<D,T> &v2) { using namespace ::std; T d2 = sqr(v2[0]-v1[0]); for (size_t i = 1; i < D; i++) d2 += sqr(v2[i]-v1[i]); return d2; } // Distance template <size_t D, class T> static inline const T dist(const Vec<D,T> &v1, const Vec<D,T> &v2) { using namespace ::std; return sqrt(dist2(v1,v2)); } // In-place normalization to unit length template <size_t D, class T> static inline Vec<D,T> normalize(Vec<D,T> &v) { using namespace ::std; T l = len(v); if (unlikely(l <= T(0))) { v[0] = T(1); for (size_t i = 1; i < D; i++) v[i] = T(0); return v; } l = T(1) / l; for (size_t i = 0; i < D; i++) v[i] *= l; return v; } // Area-weighted triangle face normal template <class T> static inline T trinorm(const T &v0, const T &v1, const T &v2) { return (typename T::value_type) 0.5 * ((v1 - v0) CROSS (v2 - v0)); } // Angle between two vectors template <size_t D, class T> static inline const T angle(const Vec<D,T> &v1, const Vec<D,T> &v2) { using namespace ::std; return atan2(len(v1 CROSS v2), v1 DOT v2); } }; // namespace trimesh // POSIX / C99 compatibility functions for MSVS #ifdef _WIN32 #ifdef cbrt # undef cbrt #endif inline float cbrt(float x) { using namespace ::std; return (x < 0.0f) ? -pow(-x, 1.0f / 3.0f) : pow(x, 1.0f / 3.0f); } inline double cbrt(double x) { using namespace ::std; return (x < 0.0) ? -pow(-x, 1.0 / 3.0) : pow(x, 1.0 / 3.0); } inline long double cbrt(long double x) { using namespace ::std; return (x < 0.0L) ? -pow(-x, 1.0L / 3.0L) : pow(x, 1.0L / 3.0L); } #ifdef round # undef round #endif inline float round(float x) { return (x < 0.0f) ? float(int(x - 0.5f)) : float(int(x + 0.5f)); } inline double round(double x) { return (x < 0.0f) ? double(int(x - 0.5)) : double(int(x + 0.5)); } inline long double round(long double x) { return (x < 0.0f) ? (long double)(int(x - 0.5L)) : (long double)(int(x + 0.5L)); } #ifdef trunc # undef trunc #endif inline float trunc(float x) { return (x < 0.0f) ? float(int(x)) : float(int(x)); } inline double trunc(double x) { return (x < 0.0f) ? double(int(x)) : double(int(x)); } inline long double trunc(long double x) { return (x < 0.0f) ? (long double)(int(x)) : (long double)(int(x)); } #endif // _WIN32 // Generic macros for declaring 1-, 2-, and 3- argument // componentwise functions on Vecs. #define VEC_DECLARE_ONEARG(name) \ template < ::std::size_t D, class T > \ static inline trimesh::Vec<D,T> name(const trimesh::Vec<D,T> &v) \ { \ using namespace ::std; \ using namespace ::trimesh; \ Vec<D,T> result(VEC_UNINITIALIZED); \ for (size_t i = 0; i < D; i++) \ result[i] = name(v[i]); \ return result; \ } // Vector-scalar, scalar-vector, and componentwise vector-vector versions #define VEC_DECLARE_TWOARG_VS(name) \ template < ::std::size_t D, class T > \ static inline trimesh::Vec<D,T> name(const trimesh::Vec<D,T> &v, const T &a) \ { \ using namespace ::std; \ using namespace ::trimesh; \ Vec<D,T> result(VEC_UNINITIALIZED); \ for (size_t i = 0; i < D; i++) \ result[i] = name(v[i], a); \ return result; \ } #define VEC_DECLARE_TWOARG_SV(name) \ template < ::std::size_t D, class T > \ static inline trimesh::Vec<D,T> name(const T &a, const trimesh::Vec<D,T> &v) \ { \ using namespace ::std; \ using namespace ::trimesh; \ Vec<D,T> result(VEC_UNINITIALIZED); \ for (size_t i = 0; i < D; i++) \ result[i] = name(a, v[i]); \ return result; \ } #define VEC_DECLARE_TWOARG_VV(name) \ template < ::std::size_t D, class T > \ static inline trimesh::Vec<D,T> name(const trimesh::Vec<D,T> &v, const trimesh::Vec<D,T> &w) \ { \ using namespace ::std; \ using namespace ::trimesh; \ Vec<D,T> result(VEC_UNINITIALIZED); \ for (size_t i = 0; i < D; i++) \ result[i] = name(v[i], w[i]); \ return result; \ } #define VEC_DECLARE_THREEARG_VSS(name) \ template < ::std::size_t D, class T > \ static inline trimesh::Vec<D,T> name(const trimesh::Vec<D,T> &v, const T &a, const T &b) \ { \ using namespace ::std; \ using namespace ::trimesh; \ Vec<D,T> result(VEC_UNINITIALIZED); \ for (size_t i = 0; i < D; i++) \ result[i] = name(v[i], a, b); \ return result; \ } #define VEC_DECLARE_THREEARG_SSV(name) \ template < ::std::size_t D, class T > \ static inline trimesh::Vec<D,T> name(const T &a, const T &b, const trimesh::Vec<D,T> &v) \ { \ using namespace ::std; \ using namespace ::trimesh; \ Vec<D,T> result(VEC_UNINITIALIZED); \ for (size_t i = 0; i < D; i++) \ result[i] = name(a, b, v[i]); \ return result; \ } #define VEC_DECLARE_THREEARG_VVV(name) \ template < ::std::size_t D, class T > \ static inline trimesh::Vec<D,T> name(const trimesh::Vec<D,T> &v, const trimesh::Vec<D,T> &w, const trimesh::Vec<D,T> &x) \ { \ using namespace ::std; \ using namespace ::trimesh; \ Vec<D,T> result(VEC_UNINITIALIZED); \ for (size_t i = 0; i < D; i++) \ result[i] = name(v[i], w[i], x[i]); \ return result; \ } // The following is the list of functions in C89 and C++98, with the exception // of frexp, ldexp, and modf (which have irregular calling conventions). // They are supposed to be in namespace std, but Visual Studio and some // older compilers also declare them in the global namespace. // In the name of compatibility, we (reluctantly) do likewise. VEC_DECLARE_ONEARG(acos) VEC_DECLARE_ONEARG(asin) VEC_DECLARE_ONEARG(atan) VEC_DECLARE_TWOARG_VV(atan2) VEC_DECLARE_ONEARG(ceil) VEC_DECLARE_ONEARG(cos) VEC_DECLARE_ONEARG(cosh) VEC_DECLARE_ONEARG(exp) VEC_DECLARE_ONEARG(fabs) VEC_DECLARE_ONEARG(floor) VEC_DECLARE_TWOARG_VS(fmod) VEC_DECLARE_TWOARG_VV(fmod) VEC_DECLARE_ONEARG(log) VEC_DECLARE_ONEARG(log10) VEC_DECLARE_TWOARG_VS(pow) VEC_DECLARE_TWOARG_SV(pow) VEC_DECLARE_TWOARG_VV(pow) VEC_DECLARE_ONEARG(sin) VEC_DECLARE_ONEARG(sinh) VEC_DECLARE_ONEARG(sqrt) VEC_DECLARE_ONEARG(tan) VEC_DECLARE_ONEARG(tanh) namespace std { using ::acos; using ::asin; using ::atan; using ::atan2; using ::ceil; using ::cos; using ::cosh; using ::exp; using ::fabs; using ::floor; using ::fmod; using ::log; using ::log10; using ::pow; using ::sin; using ::sinh; using ::sqrt; using ::tan; using ::tanh; }; // These are only in namespace std. namespace std { VEC_DECLARE_TWOARG_VS(min) VEC_DECLARE_TWOARG_SV(min) VEC_DECLARE_TWOARG_VV(min) VEC_DECLARE_TWOARG_VS(max) VEC_DECLARE_TWOARG_SV(max) VEC_DECLARE_TWOARG_VV(max) // Swap two Vecs. Not atomic, unlike class method. template <size_t D, class T> static inline void swap(const ::trimesh::Vec<D,T> &v1, const ::trimesh::Vec<D,T> &v2) { for (size_t i = 0; i < D; i++) swap(v1[i], v2[i]); } }; // These are POSIX and are commonly used. Global namespace. // Compatibility versions of these for MSVC are above. VEC_DECLARE_ONEARG(cbrt) VEC_DECLARE_ONEARG(round) VEC_DECLARE_ONEARG(trunc) // These are new functions declared in namespace trimesh. namespace trimesh { VEC_DECLARE_ONEARG(sqr) VEC_DECLARE_ONEARG(cube) VEC_DECLARE_ONEARG(sgn) VEC_DECLARE_ONEARG(fract) VEC_DECLARE_THREEARG_VSS(clamp) VEC_DECLARE_THREEARG_VVV(clamp) VEC_DECLARE_TWOARG_SV(step) VEC_DECLARE_TWOARG_VV(step) VEC_DECLARE_THREEARG_SSV(smoothstep) VEC_DECLARE_THREEARG_VVV(smoothstep) }; #undef VEC_DECLARE_ONEARG #undef VEC_DECLARE_TWOARG_VS #undef VEC_DECLARE_TWOARG_SV #undef VEC_DECLARE_TWOARG_VV #undef VEC_DECLARE_THREEARG_VSS #undef VEC_DECLARE_THREEARG_SSV #undef VEC_DECLARE_THREEARG_VVV // Both valarrays and GLSL use abs() on a vector to mean fabs(). // Let's do the same... template < ::std::size_t D, class T > static inline trimesh::Vec<D,T> abs(const trimesh::Vec<D,T> &v) { return fabs(v); } namespace std { using ::abs; }; #endif
GB_binop__isne_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isne_fc32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__isne_fc32) // A.*B function (eWiseMult): GB (_AemultB_03__isne_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_fc32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__isne_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__isne_fc32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_fc32) // C=scalar+B GB (_bind1st__isne_fc32) // C=scalar+B' GB (_bind1st_tran__isne_fc32) // C=A+scalar GB (_bind2nd__isne_fc32) // C=A'+scalar GB (_bind2nd_tran__isne_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // B,b type: GxB_FC32_t // BinaryOp: cij = GB_FC32_isne (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ GxB_FC32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_FC32_isne (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_FC32 || GxB_NO_ISNE_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isne_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isne_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isne_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isne_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isne_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isne_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isne_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isne_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isne_fc32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = Bx [p] ; Cx [p] = GB_FC32_isne (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isne_fc32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = Ax [p] ; Cx [p] = GB_FC32_isne (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = Ax [pA] ; \ Cx [pC] = GB_FC32_isne (x, aij) ; \ } GrB_Info GB (_bind1st_tran__isne_fc32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = Ax [pA] ; \ Cx [pC] = GB_FC32_isne (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__isne_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__ainv_int64_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int64_bool // op(A') function: GB_tran__ainv_int64_bool // C type: int64_t // A type: bool // cast: int64_t cij = (int64_t) aij // unaryop: cij = -aij #define GB_ATYPE \ bool #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int64_t z = (int64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT64 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int64_bool ( int64_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int64_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
channel.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC H H AAA N N N N EEEEE L % % C H H A A NN N NN N E L % % C HHHHH AAAAA N N N N N N EEE L % % C H H A A N NN N NN E L % % CCCC H H A A N N N N EEEEE LLLLL % % % % % % MagickCore Image Channel Methods % % % % Software Design % % Cristy % % December 2003 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/cache-private.h" #include "MagickCore/channel.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/enhance.h" #include "MagickCore/image.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a n n e l F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChannelFxImage() applies a channel expression to the specified image. The % expression consists of one or more channels, either mnemonic or numeric (e.g. % red, 1), separated by actions as follows: % % <=> exchange two channels (e.g. red<=>blue) % => copy one channel to another channel (e.g. red=>green) % = assign a constant value to a channel (e.g. red=50%) % , write new image channels in the specified order (e.g. red, green) % | add a new output image for the next set of channel operations % ; move to the next input image for the source of channel data % % For example, to create 3 grayscale images from the red, green, and blue % channels of an image, use: % % -channel-fx "red; green; blue" % % A channel without an operation symbol implies separate (i.e, semicolon). % % The format of the ChannelFxImage method is: % % Image *ChannelFxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o expression: A channel expression. % % o exception: return any errors or warnings in this structure. % */ typedef enum { ExtractChannelOp, AssignChannelOp, ExchangeChannelOp, TransferChannelOp } ChannelFx; static MagickBooleanType ChannelImage(Image *destination_image, const PixelChannel destination_channel,const ChannelFx channel_op, const Image *source_image,const PixelChannel source_channel, const Quantum pixel,ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; size_t height, width; ssize_t y; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); destination_view=AcquireAuthenticCacheView(destination_image,exception); height=MagickMin(source_image->rows,destination_image->rows); width=MagickMin(source_image->columns,destination_image->columns); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=0; y < (ssize_t) height; y++) { PixelTrait destination_traits, source_traits; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(destination_view,0,y, destination_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } destination_traits=GetPixelChannelTraits(destination_image, destination_channel); source_traits=GetPixelChannelTraits(source_image,source_channel); if ((destination_traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; for (x=0; x < (ssize_t) width; x++) { if (channel_op == AssignChannelOp) SetPixelChannel(destination_image,destination_channel,pixel,q); else SetPixelChannel(destination_image,destination_channel, GetPixelChannel(source_image,source_channel,p),q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(destination_image); } if (SyncCacheViewAuthenticPixels(destination_view,exception) == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *ChannelFxImage(const Image *image,const char *expression, ExceptionInfo *exception) { #define ChannelFxImageTag "ChannelFx/Image" ChannelFx channel_op; ChannelType channel_mask; char token[MagickPathExtent]; const char *p; const Image *source_image; double pixel; Image *destination_image; MagickBooleanType status; PixelChannel source_channel, destination_channel; ssize_t channels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); source_image=image; destination_image=CloneImage(source_image,0,0,MagickTrue,exception); if (destination_image == (Image *) NULL) return((Image *) NULL); if (expression == (const char *) NULL) return(destination_image); status=SetImageStorageClass(destination_image,DirectClass,exception); if (status == MagickFalse) { destination_image=GetLastImageInList(destination_image); return((Image *) NULL); } destination_channel=RedPixelChannel; channel_mask=UndefinedChannel; pixel=0.0; p=(char *) expression; (void) GetNextToken(p,&p,MagickPathExtent,token); channel_op=ExtractChannelOp; for (channels=0; *token != '\0'; ) { ssize_t i; /* Interpret channel expression. */ switch (*token) { case ',': { (void) GetNextToken(p,&p,MagickPathExtent,token); break; } case '|': { if (GetNextImageInList(source_image) != (Image *) NULL) source_image=GetNextImageInList(source_image); else source_image=GetFirstImageInList(source_image); (void) GetNextToken(p,&p,MagickPathExtent,token); break; } case ';': { Image *canvas; (void) SetPixelChannelMask(destination_image,channel_mask); if ((channel_op == ExtractChannelOp) && (channels == 1)) { (void) SetPixelMetaChannels(destination_image,0,exception); (void) SetImageColorspace(destination_image,GRAYColorspace, exception); } canvas=CloneImage(source_image,0,0,MagickTrue,exception); if (canvas == (Image *) NULL) { destination_image=DestroyImageList(destination_image); return(destination_image); } AppendImageToList(&destination_image,canvas); destination_image=GetLastImageInList(destination_image); status=SetImageStorageClass(destination_image,DirectClass,exception); if (status == MagickFalse) { destination_image=GetLastImageInList(destination_image); return((Image *) NULL); } (void) GetNextToken(p,&p,MagickPathExtent,token); channels=0; destination_channel=RedPixelChannel; channel_mask=UndefinedChannel; break; } default: break; } i=ParsePixelChannelOption(token); if (i < 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnrecognizedChannelType","`%s'",token); destination_image=DestroyImageList(destination_image); return(destination_image); } source_channel=(PixelChannel) i; channel_op=ExtractChannelOp; (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == '<') { channel_op=ExchangeChannelOp; (void) GetNextToken(p,&p,MagickPathExtent,token); } if (*token == '=') { if (channel_op != ExchangeChannelOp) channel_op=AssignChannelOp; (void) GetNextToken(p,&p,MagickPathExtent,token); } if (*token == '>') { if (channel_op != ExchangeChannelOp) channel_op=TransferChannelOp; (void) GetNextToken(p,&p,MagickPathExtent,token); } switch (channel_op) { case AssignChannelOp: case ExchangeChannelOp: case TransferChannelOp: { if (channel_op == AssignChannelOp) pixel=StringToDoubleInterval(token,(double) QuantumRange+1.0); else { i=ParsePixelChannelOption(token); if (i < 0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnrecognizedChannelType","`%s'",token); destination_image=DestroyImageList(destination_image); return(destination_image); } } destination_channel=(PixelChannel) i; if (i >= (ssize_t) GetPixelChannels(destination_image)) (void) SetPixelMetaChannels(destination_image,(size_t) ( destination_channel-GetPixelChannels(destination_image)+1), exception); if (image->colorspace != UndefinedColorspace) switch (destination_channel) { case RedPixelChannel: case GreenPixelChannel: case BluePixelChannel: case BlackPixelChannel: case IndexPixelChannel: break; case AlphaPixelChannel: { destination_image->alpha_trait=BlendPixelTrait; break; } case CompositeMaskPixelChannel: { destination_image->channels=(ChannelType) (destination_image->channels | CompositeMaskChannel); break; } case ReadMaskPixelChannel: { destination_image->channels=(ChannelType) (destination_image->channels | ReadMaskChannel); break; } case WriteMaskPixelChannel: { destination_image->channels=(ChannelType) (destination_image->channels | WriteMaskChannel); break; } case MetaPixelChannel: default: { (void) SetPixelMetaChannels(destination_image,(size_t) ( destination_channel-GetPixelChannels(destination_image)+1), exception); break; } } channel_mask=(ChannelType) (channel_mask | ParseChannelOption(token)); if (((channels >= 1) || (destination_channel >= 1)) && (IsGrayColorspace(destination_image->colorspace) != MagickFalse)) (void) SetImageColorspace(destination_image,sRGBColorspace,exception); (void) GetNextToken(p,&p,MagickPathExtent,token); break; } default: break; } status=ChannelImage(destination_image,destination_channel,channel_op, source_image,source_channel,ClampToQuantum(pixel),exception); if (status == MagickFalse) { destination_image=DestroyImageList(destination_image); break; } channels++; if (channel_op == ExchangeChannelOp) { status=ChannelImage(destination_image,source_channel,channel_op, source_image,destination_channel,ClampToQuantum(pixel),exception); if (status == MagickFalse) { destination_image=DestroyImageList(destination_image); break; } channels++; } switch (channel_op) { case ExtractChannelOp: { channel_mask=(ChannelType) (channel_mask | (1UL << destination_channel)); destination_channel=(PixelChannel) (destination_channel+1); break; } default: break; } status=SetImageProgress(source_image,ChannelFxImageTag,p-expression, strlen(expression)); if (status == MagickFalse) break; } (void) SetPixelChannelMask(destination_image,channel_mask); if ((channel_op == ExtractChannelOp) && (channels == 1)) { (void) SetPixelMetaChannels(destination_image,0,exception); (void) SetImageColorspace(destination_image,GRAYColorspace,exception); } return(GetFirstImageInList(destination_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m b i n e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CombineImages() combines one or more images into a single image. The % grayscale value of the pixels of each image in the sequence is assigned in % order to the specified channels of the combined image. The typical % ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc. % % The format of the CombineImages method is: % % Image *CombineImages(const Image *images,const ColorspaceType colorspace, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o colorspace: the image colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CombineImages(const Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { #define CombineImageTag "Combine/Image" CacheView *combine_view; Image *combine_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Ensure the image are the same size. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); combine_image=CloneImage(image,0,0,MagickTrue,exception); if (combine_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(combine_image,DirectClass,exception) == MagickFalse) { combine_image=DestroyImage(combine_image); return((Image *) NULL); } if (colorspace != UndefinedColorspace) (void) SetImageColorspace(combine_image,colorspace,exception); else if (fabs(image->gamma-1.0) <= MagickEpsilon) (void) SetImageColorspace(combine_image,RGBColorspace,exception); else (void) SetImageColorspace(combine_image,sRGBColorspace,exception); switch (combine_image->colorspace) { case UndefinedColorspace: case sRGBColorspace: { if (GetImageListLength(image) > 3) combine_image->alpha_trait=BlendPixelTrait; break; } case LinearGRAYColorspace: case GRAYColorspace: { if (GetImageListLength(image) > 1) combine_image->alpha_trait=BlendPixelTrait; break; } case CMYKColorspace: { if (GetImageListLength(image) > 4) combine_image->alpha_trait=BlendPixelTrait; break; } default: break; } /* Combine images. */ status=MagickTrue; progress=0; combine_view=AcquireAuthenticCacheView(combine_image,exception); for (y=0; y < (ssize_t) combine_image->rows; y++) { CacheView *image_view; const Image *next; Quantum *pixels; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t i; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns, 1,exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } next=image; for (i=0; i < (ssize_t) GetPixelChannels(combine_image); i++) { register ssize_t x; PixelChannel channel = GetPixelChannelChannel(combine_image,i); PixelTrait traits = GetPixelChannelTraits(combine_image,channel); if (traits == UndefinedPixelTrait) continue; if (next == (Image *) NULL) continue; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const Quantum *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { if (x < (ssize_t) next->columns) { q[i]=GetPixelGray(next,p); p+=GetPixelChannels(next); } q+=GetPixelChannels(combine_image); } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CombineImageTag,progress, combine_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } combine_view=DestroyCacheView(combine_view); if (status == MagickFalse) combine_image=DestroyImage(combine_image); return(combine_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageAlphaChannel() returns MagickFalse if the image alpha channel is % not activated. That is, the image is RGB rather than RGBA or CMYK rather % than CMYKA. % % The format of the GetImageAlphaChannel method is: % % MagickBooleanType GetImageAlphaChannel(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); return(image->alpha_trait != UndefinedPixelTrait ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImage() separates a channel from the image and returns it as a % grayscale image. % % The format of the SeparateImage method is: % % Image *SeparateImage(const Image *image,const ChannelType channel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the image channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SeparateImage(const Image *image, const ChannelType channel_type,ExceptionInfo *exception) { #define GetChannelBit(mask,bit) (((size_t) (mask) >> (size_t) (bit)) & 0x01) #define SeparateImageTag "Separate/Image" CacheView *image_view, *separate_view; Image *separate_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize separate image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); separate_image=CloneImage(image,0,0,MagickTrue,exception); if (separate_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(separate_image,DirectClass,exception) == MagickFalse) { separate_image=DestroyImage(separate_image); return((Image *) NULL); } separate_image->alpha_trait=UndefinedPixelTrait; (void) SetImageColorspace(separate_image,GRAYColorspace,exception); separate_image->gamma=image->gamma; /* Separate image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); separate_view=AcquireAuthenticCacheView(separate_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(separate_view,0,y,separate_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; SetPixelChannel(separate_image,GrayPixelChannel,(Quantum) 0,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (GetChannelBit(channel_type,channel) == 0)) continue; SetPixelChannel(separate_image,GrayPixelChannel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(separate_image); } if (SyncCacheViewAuthenticPixels(separate_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SeparateImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } separate_view=DestroyCacheView(separate_view); image_view=DestroyCacheView(image_view); (void) SetImageChannelMask(separate_image,DefaultChannels); if (status == MagickFalse) separate_image=DestroyImage(separate_image); return(separate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImages() returns a separate grayscale image for each channel % specified. % % The format of the SeparateImages method is: % % Image *SeparateImages(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SeparateImages(const Image *image,ExceptionInfo *exception) { Image *images, *separate_image; register ssize_t i; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); images=NewImageList(); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0)) continue; separate_image=SeparateImage(image,(ChannelType) (1UL << channel), exception); if (separate_image != (Image *) NULL) AppendImageToList(&images,separate_image); } if (images == (Image *) NULL) images=SeparateImage(image,UndefinedChannel,exception); return(images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha % channel. % % The format of the SetImageAlphaChannel method is: % % MagickBooleanType SetImageAlphaChannel(Image *image, % const AlphaChannelOption alpha_type,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha_type: The alpha channel type: ActivateAlphaChannel, % AssociateAlphaChannel, CopyAlphaChannel, DeactivateAlphaChannel, % DisassociateAlphaChannel, ExtractAlphaChannel, OffAlphaChannel, % OnAlphaChannel, OpaqueAlphaChannel, SetAlphaChannel, ShapeAlphaChannel, % and TransparentAlphaChannel. % % o exception: return any errors or warnings in this structure. % */ static inline void FlattenPixelInfo(const Image *image,const PixelInfo *p, const double alpha,const Quantum *q,const double beta, Quantum *composite) { double Da, gamma, Sa; register ssize_t i; /* Compose pixel p over pixel q with the given alpha. */ Sa=QuantumScale*alpha; Da=QuantumScale*beta, gamma=Sa*(-Da)+Sa+Da; gamma=PerceptibleReciprocal(gamma); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; switch (channel) { case RedPixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->red,alpha)); break; } case GreenPixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->green,alpha)); break; } case BluePixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->blue,alpha)); break; } case BlackPixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->black,alpha)); break; } case AlphaPixelChannel: { composite[i]=ClampToQuantum(QuantumRange*(Sa*(-Da)+Sa+Da)); break; } default: break; } } } MagickExport MagickBooleanType SetImageAlphaChannel(Image *image, const AlphaChannelOption alpha_type,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; switch (alpha_type) { case ActivateAlphaChannel: { image->alpha_trait=BlendPixelTrait; break; } case AssociateAlphaChannel: { /* Associate alpha. */ status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; register ssize_t i; gamma=QuantumScale*GetPixelAlpha(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (channel == AlphaPixelChannel) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(gamma*q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->alpha_trait=CopyPixelTrait; return(status); } case BackgroundAlphaChannel: { /* Set transparent pixels to background color. */ if (image->alpha_trait == UndefinedPixelTrait) break; status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image,q) == TransparentAlpha) { SetPixelViaPixelInfo(image,&image->background_color,q); SetPixelChannel(image,AlphaPixelChannel,TransparentAlpha,q); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case CopyAlphaChannel: { image->alpha_trait=UpdatePixelTrait; status=CompositeImage(image,image,IntensityCompositeOp,MagickTrue,0,0, exception); break; } case DeactivateAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); image->alpha_trait=CopyPixelTrait; break; } case DisassociateAlphaChannel: { /* Disassociate alpha. */ status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image->alpha_trait=BlendPixelTrait; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma, Sa; register ssize_t i; Sa=QuantumScale*GetPixelAlpha(image,q); gamma=PerceptibleReciprocal(Sa); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (channel == AlphaPixelChannel) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(gamma*q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->alpha_trait=UndefinedPixelTrait; return(status); } case DiscreteAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); image->alpha_trait=UpdatePixelTrait; break; } case ExtractAlphaChannel: { status=CompositeImage(image,image,AlphaCompositeOp,MagickTrue,0,0, exception); image->alpha_trait=UndefinedPixelTrait; break; } case OffAlphaChannel: { image->alpha_trait=UndefinedPixelTrait; break; } case OnAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); image->alpha_trait=BlendPixelTrait; break; } case OpaqueAlphaChannel: { status=SetImageAlpha(image,OpaqueAlpha,exception); break; } case RemoveAlphaChannel: { /* Remove transparency. */ if (image->alpha_trait == UndefinedPixelTrait) break; status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { FlattenPixelInfo(image,&image->background_color, image->background_color.alpha,q,(double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->alpha_trait=image->background_color.alpha_trait; break; } case SetAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); break; } case ShapeAlphaChannel: { /* Remove transparency. */ image->alpha_trait=BlendPixelTrait; status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo background; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } ConformPixelInfo(image,&image->background_color,&background,exception); background.alpha_trait=BlendPixelTrait; for (x=0; x < (ssize_t) image->columns; x++) { background.alpha=GetPixelIntensity(image,q); SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); break; } case TransparentAlphaChannel: { status=SetImageAlpha(image,TransparentAlpha,exception); break; } case UndefinedAlphaChannel: break; } if (status == MagickFalse) return(status); (void) SetPixelChannelMask(image,image->channel_mask); return(SyncImagePixelCache(image,exception)); }
ppc64le-varargs-f128.c
// RUN: %clang_cc1 -triple powerpc64le-unknown-linux-gnu -emit-llvm \ // RUN: -target-cpu pwr9 -target-feature +float128 -mabi=ieeelongdouble \ // RUN: -o - %s | FileCheck %s -check-prefix=IEEE // RUN: %clang_cc1 -triple powerpc64le-unknown-linux-gnu -emit-llvm \ // RUN: -target-cpu pwr9 -target-feature +float128 \ // RUN: -o - %s | FileCheck %s -check-prefix=IBM // RUN: %clang_cc1 -triple ppc64le -emit-llvm-bc %s -target-cpu pwr9 \ // RUN: -target-feature +float128 -mabi=ieeelongdouble -fopenmp \ // RUN: -fopenmp-targets=ppc64le -o %t-ppc-host.bc // RUN: %clang_cc1 -triple ppc64le -aux-triple ppc64le %s -target-cpu pwr9 \ // RUN: -target-feature +float128 -fopenmp -fopenmp-is-device -emit-llvm \ // RUN: -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s \ // RUN: -check-prefix=OMP-TARGET // RUN: %clang_cc1 -triple ppc64le %t-ppc-host.bc -emit-llvm -o - | FileCheck %s \ // RUN: -check-prefix=OMP-HOST #include <stdarg.h> typedef struct { long double x; } ldbl128_s; void foo_ld(long double); void foo_fq(__float128); void foo_ls(ldbl128_s); // Verify cases when OpenMP target's and host's long-double semantics differ. // OMP-TARGET-LABEL: define internal void @.omp_outlined.( // OMP-TARGET: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** // OMP-TARGET: %[[V2:[0-9a-zA-Z_.]+]] = bitcast i8* %[[CUR]] to ppc_fp128* // OMP-TARGET: %[[V3:[0-9a-zA-Z_.]+]] = load ppc_fp128, ppc_fp128* %[[V2]], align 8 // OMP-TARGET: call void @foo_ld(ppc_fp128 %[[V3]]) // OMP-HOST-LABEL: define{{.*}} void @omp( // OMP-HOST: %[[AP1:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP:[0-9a-zA-Z_.]+]] to i8* // OMP-HOST: call void @llvm.va_start(i8* %[[AP1]]) // OMP-HOST: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** %[[AP]], align 8 // OMP-HOST: %[[V0:[0-9a-zA-Z_.]+]] = ptrtoint i8* %[[CUR]] to i64 // OMP-HOST: %[[V1:[0-9a-zA-Z_.]+]] = add i64 %[[V0]], 15 // OMP-HOST: %[[V2:[0-9a-zA-Z_.]+]] = and i64 %[[V1]], -16 // OMP-HOST: %[[ALIGN:[0-9a-zA-Z_.]+]] = inttoptr i64 %[[V2]] to i8* // OMP-HOST: %[[V3:[0-9a-zA-Z_.]+]] = bitcast i8* %[[ALIGN]] to fp128* // OMP-HOST: %[[V4:[0-9a-zA-Z_.]+]] = load fp128, fp128* %[[V3]], align 16 // OMP-HOST: call void @foo_ld(fp128 %[[V4]]) void omp(int n, ...) { va_list ap; va_start(ap, n); foo_ld(va_arg(ap, long double)); #pragma omp target parallel for (int i = 1; i < n; ++i) { foo_ld(va_arg(ap, long double)); } va_end(ap); } // IEEE-LABEL: define{{.*}} void @f128 // IEEE: %[[AP1:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP:[0-9a-zA-Z_.]+]] to i8* // IEEE: call void @llvm.va_start(i8* %[[AP1]]) // IEEE: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** %[[AP]] // IEEE: %[[V0:[0-9a-zA-Z_.]+]] = ptrtoint i8* %[[CUR]] to i64 // IEEE: %[[V1:[0-9a-zA-Z_.]+]] = add i64 %[[V0]], 15 // IEEE: %[[V2:[0-9a-zA-Z_.]+]] = and i64 %[[V1]], -16 // IEEE: %[[ALIGN:[0-9a-zA-Z_.]+]] = inttoptr i64 %[[V2]] to i8* // IEEE: %[[V3:[0-9a-zA-Z_.]+]] = bitcast i8* %[[ALIGN]] to fp128* // IEEE: %[[V4:[0-9a-zA-Z_.]+]] = load fp128, fp128* %[[V3]], align 16 // IEEE: call void @foo_fq(fp128 %[[V4]]) // IEEE: %[[AP2:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP]] to i8* // IEEE: call void @llvm.va_end(i8* %[[AP2]]) void f128(int n, ...) { va_list ap; va_start(ap, n); foo_fq(va_arg(ap, __float128)); va_end(ap); } // IEEE-LABEL: define{{.*}} void @long_double // IEEE: %[[AP1:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP:[0-9a-zA-Z_.]+]] to i8* // IEEE: call void @llvm.va_start(i8* %[[AP1]]) // IEEE: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** %[[AP]] // IEEE: %[[V0:[0-9a-zA-Z_.]+]] = ptrtoint i8* %[[CUR]] to i64 // IEEE: %[[V1:[0-9a-zA-Z_.]+]] = add i64 %[[V0]], 15 // IEEE: %[[V2:[0-9a-zA-Z_.]+]] = and i64 %[[V1]], -16 // IEEE: %[[ALIGN:[0-9a-zA-Z_.]+]] = inttoptr i64 %[[V2]] to i8* // IEEE: %[[V3:[0-9a-zA-Z_.]+]] = bitcast i8* %[[ALIGN]] to fp128* // IEEE: %[[V4:[0-9a-zA-Z_.]+]] = load fp128, fp128* %[[V3]], align 16 // IEEE: call void @foo_ld(fp128 %[[V4]]) // IEEE: %[[AP2:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP]] to i8* // IEEE: call void @llvm.va_end(i8* %[[AP2]]) // IBM-LABEL: define{{.*}} void @long_double // IBM: %[[AP1:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP:[0-9a-zA-Z_.]+]] to i8* // IBM: call void @llvm.va_start(i8* %[[AP1]]) // IBM: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** %[[AP]] // IBM: %[[V3:[0-9a-zA-Z_.]+]] = bitcast i8* %[[CUR]] to ppc_fp128* // IBM: %[[V4:[0-9a-zA-Z_.]+]] = load ppc_fp128, ppc_fp128* %[[V3]], align 8 // IBM: call void @foo_ld(ppc_fp128 %[[V4]]) // IBM: %[[AP2:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP]] to i8* // IBM: call void @llvm.va_end(i8* %[[AP2]]) void long_double(int n, ...) { va_list ap; va_start(ap, n); foo_ld(va_arg(ap, long double)); va_end(ap); } // IEEE-LABEL: define{{.*}} void @long_double_struct // IEEE: %[[AP1:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP:[0-9a-zA-Z_.]+]] to i8* // IEEE: call void @llvm.va_start(i8* %[[AP1]]) // IEEE: %[[CUR:[0-9a-zA-Z_.]+]] = load i8*, i8** %[[AP]] // IEEE: %[[P0:[0-9a-zA-Z_.]+]] = ptrtoint i8* %[[CUR]] to i64 // IEEE: %[[P1:[0-9a-zA-Z_.]+]] = add i64 %[[P0]], 15 // IEEE: %[[P2:[0-9a-zA-Z_.]+]] = and i64 %[[P1]], -16 // IEEE: %[[ALIGN:[0-9a-zA-Z_.]+]] = inttoptr i64 %[[P2]] to i8* // IEEE: %[[V0:[0-9a-zA-Z_.]+]] = getelementptr inbounds i8, i8* %[[ALIGN]], i64 16 // IEEE: store i8* %[[V0]], i8** %[[AP]], align 8 // IEEE: %[[V1:[0-9a-zA-Z_.]+]] = bitcast i8* %[[ALIGN]] to %struct.ldbl128_s* // IEEE: %[[V2:[0-9a-zA-Z_.]+]] = bitcast %struct.ldbl128_s* %[[TMP:[0-9a-zA-Z_.]+]] to i8* // IEEE: %[[V3:[0-9a-zA-Z_.]+]] = bitcast %struct.ldbl128_s* %[[V1]] to i8* // IEEE: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %[[V2]], i8* align 16 %[[V3]], i64 16, i1 false) // IEEE: %[[COERCE:[0-9a-zA-Z_.]+]] = getelementptr inbounds %struct.ldbl128_s, %struct.ldbl128_s* %[[TMP]], i32 0, i32 0 // IEEE: %[[V4:[0-9a-zA-Z_.]+]] = load fp128, fp128* %[[COERCE]], align 16 // IEEE: call void @foo_ls(fp128 inreg %[[V4]]) // IEEE: %[[AP2:[0-9a-zA-Z_.]+]] = bitcast i8** %[[AP]] to i8* // IEEE: call void @llvm.va_end(i8* %[[AP2]]) void long_double_struct(int n, ...) { va_list ap; va_start(ap, n); foo_ls(va_arg(ap, ldbl128_s)); va_end(ap); }
fill_nr_3c.c
/* * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <stdio.h> #include "config.h" #include "cint.h" int GTOmax_shell_dim(int *ao_loc, int *shls_slice, int ncenter); int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env); /* * out[naoi,naoj,naok,comp] in F-order */ void GTOnr3c_fill_s1(int (*intor)(), double *out, double *buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t nij = naoi * naoj; const int dims[] = {naoi, naoj, naok}; ish += ish0; jsh += jsh0; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += jp * naoi + ip; int ksh, dk, k0; int shls[3]; shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { shls[2] = ksh; k0 = ao_loc[ksh ] - ao_loc[ksh0]; dk = ao_loc[ksh+1] - ao_loc[ksh]; (*intor)(out+k0*nij, dims, shls, atm, natm, bas, nbas, env, cintopt, buf); } } static void dcopy_s2_igtj(double *out, double *in, int comp, int ip, int nij, int nijk, int di, int dj, int dk) { const size_t dij = di * dj; const size_t ip1 = ip + 1; int i, j, k, ic; double *pout, *pin; for (ic = 0; ic < comp; ic++) { for (k = 0; k < dk; k++) { pout = out + k * nij; pin = in + k * dij; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pout[j] = pin[j*di+i]; } pout += ip1 + i; } } out += nijk; in += dij * dk; } } static void dcopy_s2_ieqj(double *out, double *in, int comp, int ip, int nij, int nijk, int di, int dj, int dk) { const size_t dij = di * dj; const size_t ip1 = ip + 1; int i, j, k, ic; double *pout, *pin; for (ic = 0; ic < comp; ic++) { for (k = 0; k < dk; k++) { pout = out + k * nij; pin = in + k * dij; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { pout[j] = pin[j*di+i]; } pout += ip1 + i; } } out += nijk; in += dij * dk; } } /* * out[comp,naok,nij] in C-order * nij = i1*(i1+1)/2 - i0*(i0+1)/2 * [ \ ] * [**** ] * [***** ] * [*****. ] <= . may not be filled, if jsh-upper-bound < ish-upper-bound * [ \] */ void GTOnr3c_fill_s2ij(int (*intor)(), double *out, double *buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { return; } const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const int i0 = ao_loc[ish0]; const int i1 = ao_loc[ish1]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t off = i0 * (i0 + 1) / 2; const size_t nij = i1 * (i1 + 1) / 2 - off; const size_t nijk = nij * naok; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; out += ip * (ip + 1) / 2 - off + jp; int ksh, dk, k0; int shls[3]; dk = GTOmax_shell_dim(ao_loc, shls_slice, 3); double *cache = buf + di * dj * dk * comp; shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { shls[2] = ksh; dk = ao_loc[ksh+1] - ao_loc[ksh]; k0 = ao_loc[ksh ] - ao_loc[ksh0]; (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache); if (ip != jp) { dcopy_s2_igtj(out+k0*nij, buf, comp, ip, nij, nijk, di, dj, dk); } else { dcopy_s2_ieqj(out+k0*nij, buf, comp, ip, nij, nijk, di, dj, dk); } } } void GTOnr3c_fill_s2jk(int (*intor)(), double *out, double *buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { fprintf(stderr, "GTOnr3c_fill_s2jk not implemented\n"); exit(1); } void GTOnr3c_drv(int (*intor)(), void (*fill)(), double *eri, int comp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const int di = GTOmax_shell_dim(ao_loc, shls_slice, 3); const int cache_size = GTOmax_cache_size(intor, shls_slice, 3, atm, natm, bas, nbas, env); #pragma omp parallel default(none) \ shared(intor, fill, eri, comp, shls_slice, ao_loc, cintopt, \ atm, natm, bas, nbas, env) { int ish, jsh, ij; double *buf = malloc(sizeof(double) * (di*di*di*comp + cache_size)); #pragma omp for schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { ish = ij / njsh; jsh = ij % njsh; (*fill)(intor, eri, buf, comp, ish, jsh, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } free(buf); } }
PhysicalSystemRigidBody.h
// // PhysicalSystemRigidBody.h // Gauss // // Created by David Levin on 5/8/18. // #ifndef PhysicalSystemRigidBody_h #define PhysicalSystemRigidBody_h //A dynamic rigid body #include <vector> #include <DOFParticle.h> #include <DOFRotation.h> #include <DOFPair.h> #include <UtilitiesEigen.h> #include <UtilitiesRigidBodies.h> namespace Gauss { namespace RigidBodies { template<typename DataType> class PhysicalSystemRigidBodyImpl { public: using GammaMatrix = Eigen::Matrix<DataType, 3,6>; //temporary global indices until I update the state to give these to me //automatically PhysicalSystemRigidBodyImpl(const Eigen::Ref<Eigen::MatrixXd> &V, const Eigen::Ref<Eigen::MatrixXi> &F, DataType density=1.0) { m_V = V; m_F = F; m_numVerts = m_V.rows(); m_numFaces = m_F.rows(); assert(m_V.cols() == 3); //3D only for now Eigen::Vector3x<DataType> inertia; m_mass = computeMoments(m_com, inertia, m_R0, m_V, m_F); m_mass *= density; inertia *= density; m_massMatrix.setConstant(m_mass); m_massMatrix.segment(0,3) = inertia; //subtract center of mass off of vertex positions #pragma omp parallel for for(unsigned int ii = 0; ii < V.rows(); ++ii) { m_V.row(ii) -= m_com.transpose(); } } ~PhysicalSystemRigidBodyImpl() { } DataType getEnergy(const State<DataType> &state) const { } DataType getStrainEnergy(const State<DataType> &state) const { return 0.0; } template<typename Assembler> inline void getMassMatrix(Assembler &assembler, const State<DataType> &state) const { //Rigid body mass matrix assign(assembler, m_massMatrix.asDiagonal().toDenseMatrix().eval(), getQDot(0), getQDot(0)); } //the rigid body jacobian template<typename Assembler> inline void getStiffnessMatrix(Assembler &assembler, const State<DataType> &state) const { //do nothing, rigid body has no constitutive model } template<typename Assembler> inline void getForce(Assembler &assembler, const State<DataType> &state) const { getBodyForce(assembler, state); } template<typename Assembler> inline void getInternalForce(Assembler &assembler, const State<DataType> &state) const { //centripedal force and coriolis force } template<typename Assembler> inline void getBodyForce(Assembler &assembler, const State<DataType> &state) const { //gravity goes here Eigen::Matrix<DataType, 6,1> g; g << 0,0,0, 0, m_mass*(-9.8), 0.0; g.segment(3,3) = mapDOFEigenQuat(m_q.first(), state).toRotationMatrix().transpose()*g.segment(3,3); //std::cout<<"W: "<<mapDOFEigenQuat(m_q.first(), state).x()<<" "<<mapDOFEigenQuat(m_q.first(), state).y()<<" "<<mapDOFEigenQuat(m_q.first(), state).z()<<" "<<mapDOFEigenQuat(m_q.first(), state).w()<<"\n"; //std::cout<<"ROTATION MATRIX: \n"<<mapDOFEigenQuat(m_q.first(), state).toRotationMatrix()<<"\n"; assign(assembler, g, getQDot(0)); } //Degree-of-freedom access inline auto & getQ() { return m_q; } inline const auto & getQ() const { return m_q; } inline auto & getQDot() { return m_qDot; } inline const auto & getQDot() const { return m_qDot; } //get function supporting a vertex (these return arrays in order to slot directly into assemblers) inline decltype(auto) getQ(unsigned int vertexId) const { std::array<const DOFBase<DataType,0> *,1> toReturn = {{&m_q}}; return toReturn; } inline decltype(auto) getQDot(unsigned int vertexId) const { std::array<const DOFBase<DataType,1> *,1> toReturn = {{&m_qDot}}; return toReturn; } template<typename Vector> inline decltype(auto) getQ(Vector &x, unsigned int elementId) const { std::cout<<"Error not implemented \n"; exit(0); std::array<const DOFBase<DataType,0> *, 1> toReturn = {{&m_q[elementId]}}; return toReturn; } template<typename Vector> inline decltype(auto) getQDot(Vector &x, unsigned int elementId) const { std::cout<<"Error not implemented \n"; exit(0); std::array<const DOFBase<DataType,1> *,1> toReturn = {{&m_qDot[elementId]}}; return toReturn; } //Geometry inline const auto getPosition(const State<DataType> &state, unsigned int vertexId) const { return (mapDOFEigenQuat(m_q.first(), state).toRotationMatrix()*(m_V.row(vertexId).transpose()) + m_com + mapDOFEigen(m_q.second(), state)).eval(); } inline const auto getVelocity(const State<DataType> &state, unsigned int vertexId) const { return getDPDQ(state,vertexId)*mapDOFEigen(m_qDot, state); } inline const auto getDPDQ(const State<DataType> &state, unsigned int vertexId) const { GammaMatrix gamma; gamma.setZero(); gamma.block(0,0,3,3) << 0, m_V(vertexId,2), -m_V(vertexId,1), -m_V(vertexId,2), 0, m_V(vertexId,0), m_V(vertexId,1), -m_V(vertexId,0), 0; gamma.block(0,3,3,3).setIdentity(); return mapDOFEigen(m_q.first(), state).toRotationMatrix()*gamma; } //Rigid body Jacobian goes here inline const auto getDPDQ(const State<DataType> &state, unsigned int elementId, const Eigen::Vector3x<DataType> &pos) const { std::cout<<"position-based DPDQ not implemented in rigid body system \n"; exit(0); } inline auto getGeometry() { return std::make_pair(std::ref(m_V), std::ref(m_F)); } protected: //Mesh unsigned int m_numVerts, m_numFaces; Eigen::MatrixXd m_V; Eigen::MatrixXi m_F; DataType m_mass; Eigen::Matrix33x<DataType> m_R0; //initial rotation from inertia frame to initial state Eigen::Vector3x<DataType> m_com; //center of mass in body frame Eigen::Vector6x<DataType> m_massMatrix; //mass matrix is diagonal for rigid bodies (yay!) //positions are a rotation and a particle (for the translation) -- stored in body frame DOFPair<DataType, DOFRotation, DOFParticle, 0> m_q; //velocities are an angular velocity (particle) and a linear velocity (particle) -- stored in body frame DOFPair<DataType, DOFParticle, DOFParticle, 1> m_qDot; private: }; template<typename DataType> using PhysicalSystemRigidBody = PhysicalSystem<DataType, PhysicalSystemRigidBodyImpl<DataType> >; } } #endif /* PhysicalSystemRigidBody_h */
fma_omp.c
#include <stdlib.h> #include <stdio.h> #if defined(_OPENMP) #include <omp.h> #endif #include "fma_omp.h" #include "fma_host.h" int fma_host_initialize (int N, int T, float **a) { if (!N) { *a = NULL; } else { *a = (float *) malloc (N * sizeof (float)); if (!*a) { printf ("Failed to allocate a\n"); return 1; } #pragma omp parallel for schedule(static) for (int i = 0; i < N; i++) { (*a)[i] = (float) i; } } return 0; } int fma_host_free (int N, int T, float **a) { free (*a); *a = NULL; return 0; } int fma_host_start (int N, int T, float *a, float b, float c) { #pragma omp parallel { int num_threads, my_thread; int my_start, my_end; int my_N; #if defined(_OPENMP) my_thread = omp_get_thread_num(); num_threads = omp_get_num_threads(); #else my_thread = 0; num_threads = 1; #endif /* get thread intervals */ my_start = ((size_t) my_thread * (size_t) N) / (size_t) num_threads; my_end = ((size_t) (my_thread + 1) * (size_t) N) / (size_t) num_threads; my_N = my_end - my_start; #if 0 printf ("[%d/%d]: [%d, %d)\n", my_thread, num_threads, my_start, my_end); #endif /* execute the loop */ fma_loop_host (my_N, T, &a[my_start], b, c); } return 0; } int fma_host_end (int N, int T, float *a, float b, float c) { return 0; }
blaze3.h
#include <iostream> #include <algorithm> #include <cstring> #include <vector> #include <future> #include <mutex> using namespace std; #if defined(_OPENMP) // 4 lane (i.e. kit-kat) style speed up only when OMP used // for no reason other than removing all parallelism from serial version #include "_avx2_choco.h" #endif #ifndef _AVX2_CHOCO_H // these types are already defined there using u32 = uint32_t; using u64 = uint64_t; using u8 = uint8_t; #endif const u32 OUT_LEN = 32; const u32 KEY_LEN = 32; const u32 BLOCK_LEN = 64; const u32 CHUNK_LEN = 1024; // Multiple chunks make a snicker bar :) const u32 SNICKER = 1U << 9; // Factory height and snicker size have an inversly propotional relationship // FACTORY_HT * (log2 SNICKER) + 10 >= 64 const u32 FACTORY_HT = 6; const u32 CHUNK_START = 1 << 0; const u32 CHUNK_END = 1 << 1; const u32 PARENT = 1 << 2; const u32 ROOT = 1 << 3; const u32 KEYED_HASH = 1 << 4; const int usize = sizeof(u32) * 8; mutex factory_lock; const int IS_ASYNC = 0; const u32 IV[8] = { 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19, }; const int MSG_PERMUTATION[] = { 2, 6, 3, 10, 7, 0, 4, 13, 1, 11, 12, 5, 9, 14, 15, 8 }; u32 rotr(u32 value, int shift) { return (value >> shift)|(value << (usize - shift)); } void g(u32 state[16], u32 a, u32 b, u32 c, u32 d, u32 mx, u32 my) { state[a] = state[a] + state[b] + mx; state[d] = rotr((state[d] ^ state[a]), 16); state[c] = state[c] + state[d]; state[b] = rotr((state[b] ^ state[c]), 12); state[a] = state[a] + state[b] + my; state[d] = rotr((state[d] ^ state[a]), 8); state[c] = state[c] + state[d]; state[b] = rotr((state[b] ^ state[c]), 7); } void round(u32 state[16], u32 m[16]) { // Mix the columns. g(state, 0, 4, 8, 12, m[0], m[1]); g(state, 1, 5, 9, 13, m[2], m[3]); g(state, 2, 6, 10, 14, m[4], m[5]); g(state, 3, 7, 11, 15, m[6], m[7]); // Mix the diagonals. g(state, 0, 5, 10, 15, m[8], m[9]); g(state, 1, 6, 11, 12, m[10], m[11]); g(state, 2, 7, 8, 13, m[12], m[13]); g(state, 3, 4, 9, 14, m[14], m[15]); } void permute(u32 m[16]) { u32 permuted[16]; for(int i=0; i<16; i++) permuted[i] = m[MSG_PERMUTATION[i]]; for(int i=0; i<16; i++) m[i] = permuted[i]; } #ifndef _AVX2_CHOCO_H // AVX2 has a specialized version for this void compress( u32 chaining_value[8], u32 block_words[16], u64 counter, u32 block_len, u32 flags, u32 state[16] ) { memcpy(state, chaining_value, 8*sizeof(*state)); memcpy(state+8, IV, 4*sizeof(*state)); state[12] = (u32)counter; state[13] = (u32)(counter >> 32); state[14] = block_len; state[15] = flags; u32 block[16]; memcpy(block, block_words, 16*sizeof(*block)); round(state, block); // round 1 permute(block); round(state, block); // round 2 permute(block); round(state, block); // round 3 permute(block); round(state, block); // round 4 permute(block); round(state, block); // round 5 permute(block); round(state, block); // round 6 permute(block); round(state, block); // round 7 for(int i=0; i<8; i++){ state[i] ^= state[i + 8]; state[i + 8] ^= chaining_value[i]; } } #endif void words_from_little_endian_bytes(u8 *bytes, u32 *words, u32 bytes_len) { u32 tmp; for(u32 i=0; i<bytes_len; i+=4) { tmp = (bytes[i+3]<<24) | (bytes[i+2]<<16) | (bytes[i+1]<<8) | bytes[i]; words[i/4] = tmp; } } struct Chunk { // use only when it is a leaf node // leaf data may have less than 1024 bytes u8 leaf_data[1024]; u32 leaf_len; // use in all other cases // data will always have 64 bytes u32 data[16]; u32 flags; u32 raw_hash[16]; u32 key[8]; // only useful for leaf nodes u64 counter; // Constructor for leaf nodes Chunk(char *input, int size, u32 _flags, u32 *_key, u64 ctr){ counter = ctr; flags = _flags; memcpy(key, _key, 8*sizeof(*key)); memset(leaf_data, 0, 1024); memcpy(leaf_data, input, size); leaf_len = size; } Chunk(u32 _flags, u32 *_key) { counter = 0; flags = _flags; memcpy(key, _key, 8*sizeof(*key)); leaf_len = 0; } Chunk() : leaf_len(0) {} // process data in sizes of message blocks and store cv in hash void compress_chunk(u32=0); }; void Chunk::compress_chunk(u32 out_flags) { if(flags&PARENT) { compress( key, data, 0, // counter is always zero for parent nodes BLOCK_LEN, flags | out_flags, raw_hash ); return; } u32 chaining_value[8], block_len = BLOCK_LEN, flagger; memcpy(chaining_value, key, 8*sizeof(*chaining_value)); bool empty_input = (leaf_len==0); if(empty_input) { for(u32 i=0; i<BLOCK_LEN; i++) leaf_data[i] = 0U; leaf_len = BLOCK_LEN; } for(u32 i=0; i<leaf_len; i+=BLOCK_LEN) { flagger = flags; // for the last message block if(i+BLOCK_LEN > leaf_len) block_len = leaf_len%BLOCK_LEN; else block_len = BLOCK_LEN; // special case if(empty_input) block_len = 0; u32 block_words[16]; memset(block_words, 0, 16*sizeof(*block_words)); u32 new_block_len(block_len); if(block_len%4) new_block_len += 4 - (block_len%4); words_from_little_endian_bytes(leaf_data+i, block_words, new_block_len); if(i==0) flagger |= CHUNK_START; if(i+BLOCK_LEN >= leaf_len) flagger |= CHUNK_END | out_flags; // raw hash for root node compress( chaining_value, block_words, counter, block_len, flagger, raw_hash ); memcpy(chaining_value, raw_hash, 8*sizeof(*chaining_value)); } } Chunk hash_many(vector<Chunk> &data, int first, int last); Chunk merge(Chunk &left, Chunk &right); void hash_root(Chunk &node, vector<u8> &out_slice); struct Hasher { u32 key[8]; u32 flags; u64 ctr; // Factory of FACTORY_HT possible SNICKER bars vector<Chunk> factory[FACTORY_HT]; // methods static Hasher new_internal(const u32 key[8], u32 flags); static Hasher _new(); void update(char *input, int size); void finalize(vector<u8> &out_slice); }; Hasher Hasher::new_internal(const u32 key[8], u32 flags) { return Hasher{ { key[0], key[1], key[2], key[3], key[4], key[5], key[6], key[7] }, flags, 0 // counter }; } Hasher Hasher::_new() { return new_internal(IV, 0); } void propagate(Hasher *h) { int level=0; while(h->factory[level].size() == SNICKER) { // nodes move to upper levels if lower one is one SNICKER long Chunk subtree = hash_many( h->factory[level], 0, h->factory[level].size() ); h->factory[level].clear(); ++level; h->factory[level].emplace_back(subtree); } #if IS_ASYNC factory_lock.unlock(); #endif } void Hasher::update(char *input, int size) { factory[0].emplace_back(input, size, flags, key, ctr); ++ctr; if(factory[0].size() == SNICKER) { // Let this run in the background // Async version slows down execution by 2x #if IS_ASYNC factory_lock.lock(); static_cast<void>(async(propagate, this)); #else propagate(this); #endif } } void Hasher::finalize(vector<u8> &out_slice) { // cout << "Finalizing\n"; // New style // At every level, compress biggest to smallest, then merge them all in the reverse order // Pass on the new node to the upper level // Continue till topmost level reached. Guaranteed only one node there // Root hash the final node #if IS_ASYNC factory_lock.lock(); #endif vector<Chunk> subtrees; Chunk root(flags, key); for(u32 i=0; i<FACTORY_HT; i++) { u32 n = factory[i].size(), divider=SNICKER; if(!n) continue; int start = 0; while(divider) { if(n&divider) { subtrees.emplace_back( hash_many(factory[i], start, start+divider) ); start += divider; } divider >>= 1; } while(subtrees.size()>1) { Chunk tmp1 = subtrees.back(); subtrees.pop_back(); Chunk tmp2 = subtrees.back(); subtrees.pop_back(); // tmp2 is the left child // tmp1 is the right child // that's the order they appear within the array Chunk tmp = merge(tmp2, tmp1); subtrees.push_back(tmp); } if(i<FACTORY_HT-1) factory[i+1].push_back(subtrees[0]); else root = subtrees[0]; subtrees.clear(); } hash_root(root, out_slice); } // A divide and conquer approach Chunk hash_many(vector<Chunk> &data, int first, int last) { // n will always be a power of 2 int n = last-first; if(n == 1) { data[first].compress_chunk(); return data[first]; } // cout << "Called hash many for size: " << n << endl; Chunk left, right; // parallelism here // left and right computation can be done simultaneously #pragma omp parallel { #pragma omp single { #pragma omp task left = hash_many(data, first, first+n/2); #pragma omp task right = hash_many(data, first+n/2, last); } } // parallelism ends Chunk parent(left.flags, left.key); parent.flags |= PARENT; memcpy(parent.data, left.raw_hash, 32); memcpy(parent.data+8, right.raw_hash, 32); parent.compress_chunk(); return parent; } Chunk merge(Chunk &left, Chunk &right) { // cout << "Called merge once\n"; left.compress_chunk(); right.compress_chunk(); Chunk parent(left.flags, left.key); parent.flags |= PARENT; memcpy(parent.data, left.raw_hash, 32); memcpy(parent.data+8, right.raw_hash, 32); return parent; } void hash_root(Chunk &node, vector<u8> &out_slice) { // the last message block must not be hashed like the others // it needs to be hashed with the root flag u64 output_block_counter = 0; u64 i=0, k=2*OUT_LEN; u32 words[16] = {}; for(; int(out_slice.size()-i)>0; i+=k) { node.counter = output_block_counter; node.compress_chunk(ROOT); // words is u32[16] memcpy(words, node.raw_hash, 16*sizeof(*words)); vector<u8> out_block(min(k, (u64)out_slice.size()-i)); for(u32 l=0; l<out_block.size(); l+=4) { for(u32 j=0; j<min(4U, (u32)out_block.size()-l); j++) out_block[l+j] = (words[l/4]>>(8*j)) & 0x000000FF; } for(u32 j=0; j<out_block.size(); j++) out_slice[i+j] = out_block[j]; ++output_block_counter; } }
O5Indirect3D.c
#include <mpi.h> #include "grid.h" extern struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *gv_temp; extern struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *gv_ind3Dvar; extern struct { char *name; int loc; int dim; union { int *restrict * restrict p2; int *restrict * restrict * restrict p3; } data_pointer; } *t3DVer; void O5Indirect3D(GRID * g) { { size_t min_block = g->mpi_rank == (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t height_index = (0); height_index < (g->height); height_index++) { for (size_t edge_index = (0); edge_index < (g->blkSize); edge_index++) { gv_ind3Dvar->data_pointer.p3[(block_index)][(height_index)][(edge_index)] = 0; } } } } }
DRB009-lastprivatemissing-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This loop has loop-carried output-dependence due to x=... at line 59. The problem can be solved by using lastprivate(x). Data race pair: x@59 vs. x@59 */ #include <stdio.h> int main(int argc, char* argv[]) { int i,x; int len = 10000; #pragma omp parallel for private (i) for (i=0;i<len;i++) x=i; printf("x=%d",x); return 0; }
maxpool_layer.c
#include "maxpool_layer.h" #include "convolutional_layer.h" #include "dark_cuda.h" #include "utils.h" #include "gemm.h" #include <stdio.h> image get_maxpool_image(maxpool_layer l) { int h = l.out_h; int w = l.out_w; int c = l.c; return float_to_image(w,h,c,l.output); } image get_maxpool_delta(maxpool_layer l) { int h = l.out_h; int w = l.out_w; int c = l.c; return float_to_image(w,h,c,l.delta); } void create_maxpool_cudnn_tensors(layer *l) { #ifdef CUDNN CHECK_CUDNN(cudnnCreatePoolingDescriptor(&l->poolingDesc)); CHECK_CUDNN(cudnnCreateTensorDescriptor(&l->srcTensorDesc)); CHECK_CUDNN(cudnnCreateTensorDescriptor(&l->dstTensorDesc)); #endif // CUDNN } void cudnn_maxpool_setup(layer *l) { #ifdef CUDNN CHECK_CUDNN(cudnnSetPooling2dDescriptor( l->poolingDesc, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN, // CUDNN_PROPAGATE_NAN, CUDNN_NOT_PROPAGATE_NAN l->size, l->size, l->pad/2, //0, //l.pad, l->pad/2, //0, //l.pad, l->stride_x, l->stride_y)); CHECK_CUDNN(cudnnSetTensor4dDescriptor(l->srcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->c, l->h, l->w)); CHECK_CUDNN(cudnnSetTensor4dDescriptor(l->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w)); #endif // CUDNN } void cudnn_local_avgpool_setup(layer *l) { #ifdef CUDNN CHECK_CUDNN(cudnnSetPooling2dDescriptor( l->poolingDesc, CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING, CUDNN_NOT_PROPAGATE_NAN, // CUDNN_PROPAGATE_NAN, CUDNN_NOT_PROPAGATE_NAN l->size, l->size, l->pad / 2, //0, //l.pad, l->pad / 2, //0, //l.pad, l->stride_x, l->stride_y)); CHECK_CUDNN(cudnnSetTensor4dDescriptor(l->srcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->c, l->h, l->w)); CHECK_CUDNN(cudnnSetTensor4dDescriptor(l->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w)); #endif // CUDNN } maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride_x, int stride_y, int padding, int maxpool_depth, int out_channels, int antialiasing, int avgpool, int train) { maxpool_layer l = { (LAYER_TYPE)0 }; l.avgpool = avgpool; if (avgpool) l.type = LOCAL_AVGPOOL; else l.type = MAXPOOL; l.train = train; const int blur_stride_x = stride_x; const int blur_stride_y = stride_y; l.antialiasing = antialiasing; if (antialiasing) { stride_x = stride_y = l.stride = l.stride_x = l.stride_y = 1; // use stride=1 in host-layer } l.batch = batch; l.h = h; l.w = w; l.c = c; l.pad = padding; l.maxpool_depth = maxpool_depth; l.out_channels = out_channels; if (maxpool_depth) { l.out_c = out_channels; l.out_w = l.w; l.out_h = l.h; } else { l.out_w = (w + padding - size) / stride_x + 1; l.out_h = (h + padding - size) / stride_y + 1; l.out_c = c; } l.outputs = l.out_h * l.out_w * l.out_c; l.inputs = h*w*c; l.size = size; l.stride = stride_x; l.stride_x = stride_x; l.stride_y = stride_y; int output_size = l.out_h * l.out_w * l.out_c * batch; if (train) { if (!avgpool) l.indexes = (int*)xcalloc(output_size, sizeof(int)); l.delta = (float*)xcalloc(output_size, sizeof(float)); } l.output = (float*)xcalloc(output_size, sizeof(float)); if (avgpool) { l.forward = forward_local_avgpool_layer; l.backward = backward_local_avgpool_layer; } else { l.forward = forward_maxpool_layer; l.backward = backward_maxpool_layer; } #ifdef GPU if (avgpool) { l.forward_gpu = forward_local_avgpool_layer_gpu; l.backward_gpu = backward_local_avgpool_layer_gpu; } else { l.forward_gpu = forward_maxpool_layer_gpu; l.backward_gpu = backward_maxpool_layer_gpu; } if (train) { if (!avgpool) l.indexes_gpu = cuda_make_int_array(output_size); l.delta_gpu = cuda_make_array(l.delta, output_size); } l.output_gpu = cuda_make_array(l.output, output_size); create_maxpool_cudnn_tensors(&l); if (avgpool) cudnn_local_avgpool_setup(&l); else cudnn_maxpool_setup(&l); #endif // GPU l.bflops = (l.size*l.size*l.c * l.out_h*l.out_w) / 1000000000.; if (avgpool) { if (stride_x == stride_y) fprintf(stderr, "avg %2dx%2d/%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops); else fprintf(stderr, "avg %2dx%2d/%2dx%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, stride_y, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops); } else { if (maxpool_depth) fprintf(stderr, "max-depth %2dx%2d/%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops); else if (stride_x == stride_y) fprintf(stderr, "max %2dx%2d/%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops); else fprintf(stderr, "max %2dx%2d/%2dx%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, stride_y, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops); } if (l.antialiasing) { printf("AA: "); l.input_layer = (layer*)calloc(1, sizeof(layer)); int blur_size = 3; int blur_pad = blur_size / 2; if (l.antialiasing == 2) { blur_size = 2; blur_pad = 0; } *(l.input_layer) = make_convolutional_layer(batch, 1, l.out_h, l.out_w, l.out_c, l.out_c, l.out_c, blur_size, blur_stride_x, blur_stride_y, 1, blur_pad, LINEAR, 0, 0, 0, 0, 0, 1, 0, NULL, 0, 0, train); const int blur_nweights = l.out_c * blur_size * blur_size; // (n / n) * n * blur_size * blur_size; int i; if (blur_size == 2) { for (i = 0; i < blur_nweights; i += (blur_size*blur_size)) { l.input_layer->weights[i + 0] = 1 / 4.f; l.input_layer->weights[i + 1] = 1 / 4.f; l.input_layer->weights[i + 2] = 1 / 4.f; l.input_layer->weights[i + 3] = 1 / 4.f; } } else { for (i = 0; i < blur_nweights; i += (blur_size*blur_size)) { l.input_layer->weights[i + 0] = 1 / 16.f; l.input_layer->weights[i + 1] = 2 / 16.f; l.input_layer->weights[i + 2] = 1 / 16.f; l.input_layer->weights[i + 3] = 2 / 16.f; l.input_layer->weights[i + 4] = 4 / 16.f; l.input_layer->weights[i + 5] = 2 / 16.f; l.input_layer->weights[i + 6] = 1 / 16.f; l.input_layer->weights[i + 7] = 2 / 16.f; l.input_layer->weights[i + 8] = 1 / 16.f; } } for (i = 0; i < l.out_c; ++i) l.input_layer->biases[i] = 0; #ifdef GPU if (gpu_index >= 0) { if (l.antialiasing) l.input_antialiasing_gpu = cuda_make_array(NULL, l.batch*l.outputs); push_convolutional_layer(*(l.input_layer)); } #endif // GPU } return l; } void resize_maxpool_layer(maxpool_layer *l, int w, int h) { l->h = h; l->w = w; l->inputs = h*w*l->c; l->out_w = (w + l->pad - l->size) / l->stride_x + 1; l->out_h = (h + l->pad - l->size) / l->stride_y + 1; l->outputs = l->out_w * l->out_h * l->out_c; int output_size = l->outputs * l->batch; if (l->train) { if (!l->avgpool) l->indexes = (int*)xrealloc(l->indexes, output_size * sizeof(int)); l->delta = (float*)xrealloc(l->delta, output_size * sizeof(float)); } l->output = (float*)xrealloc(l->output, output_size * sizeof(float)); #ifdef GPU CHECK_CUDA(cudaFree(l->output_gpu)); l->output_gpu = cuda_make_array(l->output, output_size); if (l->train) { if (!l->avgpool) { CHECK_CUDA(cudaFree((float *)l->indexes_gpu)); l->indexes_gpu = cuda_make_int_array(output_size); } CHECK_CUDA(cudaFree(l->delta_gpu)); l->delta_gpu = cuda_make_array(l->delta, output_size); } if(l->avgpool) cudnn_local_avgpool_setup(l); else cudnn_maxpool_setup(l); #endif } void forward_maxpool_layer(const maxpool_layer l, network_state state) { if (l.maxpool_depth) { int b, i, j, k, g; for (b = 0; b < l.batch; ++b) { #pragma omp parallel for for (i = 0; i < l.h; ++i) { for (j = 0; j < l.w; ++j) { for (g = 0; g < l.out_c; ++g) { int out_index = j + l.w*(i + l.h*(g + l.out_c*b)); float max = -FLT_MAX; int max_i = -1; for (k = g; k < l.c; k += l.out_c) { int in_index = j + l.w*(i + l.h*(k + l.c*b)); float val = state.input[in_index]; max_i = (val > max) ? in_index : max_i; max = (val > max) ? val : max; } l.output[out_index] = max; if (l.indexes) l.indexes[out_index] = max_i; } } } } return; } if (!state.train && l.stride_x == l.stride_y) { forward_maxpool_layer_avx(state.input, l.output, l.indexes, l.size, l.w, l.h, l.out_w, l.out_h, l.c, l.pad, l.stride, l.batch); } else { int b, i, j, k, m, n; int w_offset = -l.pad / 2; int h_offset = -l.pad / 2; int h = l.out_h; int w = l.out_w; int c = l.c; for (b = 0; b < l.batch; ++b) { for (k = 0; k < c; ++k) { for (i = 0; i < h; ++i) { for (j = 0; j < w; ++j) { int out_index = j + w*(i + h*(k + c*b)); float max = -FLT_MAX; int max_i = -1; for (n = 0; n < l.size; ++n) { for (m = 0; m < l.size; ++m) { int cur_h = h_offset + i*l.stride_y + n; int cur_w = w_offset + j*l.stride_x + m; int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c)); int valid = (cur_h >= 0 && cur_h < l.h && cur_w >= 0 && cur_w < l.w); float val = (valid != 0) ? state.input[index] : -FLT_MAX; max_i = (val > max) ? index : max_i; max = (val > max) ? val : max; } } l.output[out_index] = max; if (l.indexes) l.indexes[out_index] = max_i; } } } } } if (l.antialiasing) { network_state s = { 0 }; s.train = state.train; s.workspace = state.workspace; s.net = state.net; s.input = l.output; forward_convolutional_layer(*(l.input_layer), s); //simple_copy_ongpu(l.outputs*l.batch, l.output, l.input_antialiasing); memcpy(l.output, l.input_layer->output, l.input_layer->outputs * l.input_layer->batch * sizeof(float)); } } void backward_maxpool_layer(const maxpool_layer l, network_state state) { int i; int h = l.out_h; int w = l.out_w; int c = l.out_c; #pragma omp parallel for for(i = 0; i < h*w*c*l.batch; ++i){ int index = l.indexes[i]; state.delta[index] += l.delta[i]; } } void forward_local_avgpool_layer(const maxpool_layer l, network_state state) { int b, i, j, k, m, n; int w_offset = -l.pad / 2; int h_offset = -l.pad / 2; int h = l.out_h; int w = l.out_w; int c = l.c; for (b = 0; b < l.batch; ++b) { for (k = 0; k < c; ++k) { for (i = 0; i < h; ++i) { for (j = 0; j < w; ++j) { int out_index = j + w*(i + h*(k + c*b)); float avg = 0; int counter = 0; for (n = 0; n < l.size; ++n) { for (m = 0; m < l.size; ++m) { int cur_h = h_offset + i*l.stride_y + n; int cur_w = w_offset + j*l.stride_x + m; int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c)); int valid = (cur_h >= 0 && cur_h < l.h && cur_w >= 0 && cur_w < l.w); if (valid) { counter++; avg += state.input[index]; } } } l.output[out_index] = avg / counter; } } } } } void backward_local_avgpool_layer(const maxpool_layer l, network_state state) { int b, i, j, k, m, n; int w_offset = -l.pad / 2; int h_offset = -l.pad / 2; int h = l.out_h; int w = l.out_w; int c = l.c; for (b = 0; b < l.batch; ++b) { for (k = 0; k < c; ++k) { for (i = 0; i < h; ++i) { for (j = 0; j < w; ++j) { int out_index = j + w*(i + h*(k + c*b)); for (n = 0; n < l.size; ++n) { for (m = 0; m < l.size; ++m) { int cur_h = h_offset + i*l.stride_y + n; int cur_w = w_offset + j*l.stride_x + m; int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c)); int valid = (cur_h >= 0 && cur_h < l.h && cur_w >= 0 && cur_w < l.w); if (valid) state.delta[index] += l.delta[out_index] / (l.size*l.size); } } } } } } }
GB_unop__round_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__round_fc32_fc32 // op(A') function: GB_unop_tran__round_fc32_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_croundf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_croundf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_croundf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ROUND || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__round_fc32_fc32 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_croundf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__round_fc32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__minv_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__minv_fp32_fp32) // op(A') function: GB (_unop_tran__minv_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = (1.0F)/aij #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = (1.0F)/x ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = (1.0F)/z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__minv_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = (1.0F)/z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = (1.0F)/z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__minv_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
IntegratorMCMMonoImplicit.h
// Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #ifndef __MCM_MONO_IMPLICIT__H__ #define __MCM_MONO_IMPLICIT__H__ #include "IntegratorMCMMono.h" #include "hoomd/Autotuner.h" #include <random> #include <cfloat> #ifdef _OPENMP #include <omp.h> #endif /*! \file IntegratorMCMMonoImplicit.h \brief Defines the template class for MCM with implicit generated depletant solvent \note This header cannot be compiled by nvcc */ #ifdef NVCC #error This header cannot be compiled by nvcc #endif #include <hoomd/extern/pybind/include/pybind11/pybind11.h> namespace mcm { //! Template class for MCM update with implicit depletants /*! Depletants are generated randomly on the fly according to the semi-grand canonical ensemble. The penetrable depletants model is simulated. \ingroup mcm_integrators */ template< class Shape > class IntegratorMCMMonoImplicit : public IntegratorMCMMono<Shape> { public: //! Construct the integrator IntegratorMCMMonoImplicit(std::shared_ptr<SystemDefinition> sysdef, unsigned int seed); //! Destructor virtual ~IntegratorMCMMonoImplicit(); //! Set the depletant density in the free volume void setDepletantDensity(Scalar n_R) { m_n_R = n_R; m_need_initialize_poisson = true; } //! Set the type of depletant particle void setDepletantType(unsigned int type) { m_type = type; } //! Number of depletant-reinsertions /*! \param n_trial Depletant reinsertions per overlapping depletant */ void setNTrial(unsigned int n_trial) { m_n_trial = n_trial; } //! Return number of depletant re-insertions unsigned int getNTrial() { return m_n_trial; } //! Returns the depletant density Scalar getDepletantDensity() { return m_n_R; } //! Return the depletant type unsigned int getDepletantType() { return m_type; } //! Return the number of re-insertion trials unsigned int getNumTrials() const { return m_n_trial; } //! Reset statistics counters virtual void resetStats() { IntegratorMCMMono<Shape>::resetStats(); ArrayHandle<mcm_implicit_counters_t> h_counters(m_implicit_count, access_location::host, access_mode::read); m_implicit_count_run_start = h_counters.data[0]; } //! Print statistics about the mcm steps taken virtual void printStats() { IntegratorMCMMono<Shape>::printStats(); mcm_implicit_counters_t result = getImplicitCounters(1); double cur_time = double(this->m_clock.getTime()) / Scalar(1e9); this->m_exec_conf->msg->notice(2) << "-- Implicit depletants stats:" << "\n"; this->m_exec_conf->msg->notice(2) << "Depletant insertions per second: " << double(result.insert_count)/cur_time << "\n"; this->m_exec_conf->msg->notice(2) << "Configurational bias attempts per second: " << double(result.reinsert_count)/cur_time << "\n"; this->m_exec_conf->msg->notice(2) << "Fraction of depletants in free volume: " << result.getFreeVolumeFraction() << "\n"; this->m_exec_conf->msg->notice(2) << "Fraction of overlapping depletants: " << result.getOverlapFraction()<< "\n"; } //! Get the current counter values mcm_implicit_counters_t getImplicitCounters(unsigned int mode=0); /* \returns a list of provided quantities */ std::vector< std::string > getProvidedLogQuantities() { // start with the integrator provided quantities std::vector< std::string > result = IntegratorMCMMono<Shape>::getProvidedLogQuantities(); // then add ours result.push_back("mcm_fugacity"); result.push_back("mcm_ntrial"); result.push_back("mcm_insert_count"); result.push_back("mcm_reinsert_count"); result.push_back("mcm_free_volume_fraction"); result.push_back("mcm_overlap_fraction"); result.push_back("mcm_configurational_bias_ratio"); return result; } //! Get the value of a logged quantity virtual Scalar getLogValue(const std::string& quantity, unsigned int timestep); //! Method to scale the box virtual bool attemptBoxResize(unsigned int timestep, const BoxDim& new_box); //! Slot to be called when number of types changes void slotNumTypesChange(); protected: Scalar m_n_R; //!< Average depletant number density in free volume unsigned int m_type; //!< Type of depletant particle to generate GPUArray<mcm_implicit_counters_t> m_implicit_count; //!< Counter of active cell cluster moves mcm_implicit_counters_t m_implicit_count_run_start; //!< Counter of active cell cluster moves at run start mcm_implicit_counters_t m_implicit_count_step_start; //!< Counter of active cell cluster moves at run start std::vector<std::poisson_distribution<unsigned int> > m_poisson; //!< Poisson distribution std::vector<Scalar> m_lambda; //!< Poisson distribution parameters per type Scalar m_d_dep; //!< Depletant circumsphere diameter GPUArray<Scalar> m_d_min; //!< Minimum sphere from which test depletant is excluded GPUArray<Scalar> m_d_max; //!< Maximum sphere for test depletant insertion std::vector<hoomd::detail::Saru> m_rng_depletant; //!< RNGs for depletant insertion bool m_rng_initialized; //!< True if RNGs have been initialized unsigned int m_n_trial; //!< Number of trial re-insertions per depletant bool m_need_initialize_poisson; //!< Flag to tell if we need to initialize the poisson distribution //! Take one timestep forward virtual void update(unsigned int timestep); //! Initalize Poisson distribution parameters virtual void updatePoissonParameters(); //! Initialize the Poisson distributions virtual void initializePoissonDistribution(); //! Set the nominal width appropriate for depletion interaction virtual void updateCellWidth(); //! Generate a random depletant position in a sphere around a particle template<class RNG> inline void generateDepletant(RNG& rng, vec3<Scalar> pos_sphere, Scalar delta, Scalar d_min, vec3<Scalar>& pos, quat<Scalar>& orientation, const typename Shape::param_type& params_depletants); /*! Generate a random depletant position in a region including the sphere around a particle, restricted so that it does not intersect another sphere */ template<class RNG> inline void generateDepletantRestricted(RNG& rng, vec3<Scalar> pos_sphere, Scalar delta, Scalar delta_other, vec3<Scalar>& pos, quat<Scalar>& orientation, const typename Shape::param_type& params_depletants, vec3<Scalar> pos_sphere_other); //! Try inserting a depletant in a configuration such that it overlaps with the particle in the old (new) configuration inline bool insertDepletant(vec3<Scalar>& pos_depletant, const Shape& shape_depletant, unsigned int idx, typename Shape::param_type *params, unsigned int *h_overlaps, unsigned int typ_i, Scalar4 *h_postype, Scalar4 *h_orientation, vec3<Scalar> pos_new, quat<Scalar>& orientation_new, const typename Shape::param_type& params_new, unsigned int &overlap_checks, unsigned int &overlap_err_count, bool &overlap_shape, bool new_config); }; /*! \param sysdef System definition \param cl Cell list \param seed Random number generator seed NOTE: only 3d supported at this time */ template< class Shape > IntegratorMCMMonoImplicit< Shape >::IntegratorMCMMonoImplicit(std::shared_ptr<SystemDefinition> sysdef, unsigned int seed) : IntegratorMCMMono<Shape>(sysdef, seed), m_n_R(0), m_type(0), m_d_dep(0.0), m_rng_initialized(false), m_n_trial(0), m_need_initialize_poisson(true) { this->m_exec_conf->msg->notice(5) << "Constructing IntegratorMCMImplicit" << std::endl; GPUArray<mcm_implicit_counters_t> implicit_count(1,this->m_exec_conf); m_implicit_count.swap(implicit_count); GPUArray<Scalar> d_min(this->m_pdata->getNTypes(), this->m_exec_conf); m_d_min.swap(d_min); GPUArray<Scalar> d_max(this->m_pdata->getNTypes(), this->m_exec_conf); m_d_max.swap(d_max); m_lambda.resize(this->m_pdata->getNTypes(),FLT_MAX); } //! Destructor template< class Shape > IntegratorMCMMonoImplicit< Shape >::~IntegratorMCMMonoImplicit() { } template <class Shape> void IntegratorMCMMonoImplicit<Shape>::slotNumTypesChange() { // call parent class method IntegratorMCMMono<Shape>::slotNumTypesChange(); m_lambda.resize(this->m_pdata->getNTypes(),FLT_MAX); GPUArray<Scalar> d_min(this->m_pdata->getNTypes(), this->m_exec_conf); m_d_min.swap(d_min); GPUArray<Scalar> d_max(this->m_pdata->getNTypes(), this->m_exec_conf); m_d_max.swap(d_max); m_need_initialize_poisson = true; } template< class Shape > void IntegratorMCMMonoImplicit< Shape >::updatePoissonParameters() { // Depletant diameter quat<Scalar> o; Shape shape_depletant(o, this->m_params[this->m_type]); m_d_dep = shape_depletant.getCircumsphereDiameter(); // access GPUArrays ArrayHandle<Scalar> h_d_min(m_d_min, access_location::host, access_mode::overwrite); ArrayHandle<Scalar> h_d_max(m_d_max, access_location::host, access_mode::overwrite); for (unsigned int i_type = 0; i_type < this->m_pdata->getNTypes(); ++i_type) { // test sphere diameter and volume Shape shape_i(quat<Scalar>(), this->m_params[i_type]); Scalar delta = shape_i.getCircumsphereDiameter()+m_d_dep; h_d_max.data[i_type] = delta; // volume of insertion sphere Scalar V = Scalar(M_PI/6.0)*delta*delta*delta; // Minimum diameter of colloid sphere in which depletant can be inserted without overlapping with other colloids // Scalar d = std::max(Scalar(2.0)*shape_i.getInsphereRadius()-m_d_dep,0.0); Scalar d = Scalar(0.0); h_d_min.data[i_type] = d; // subtract inner sphere from sampling volume V -= Scalar(M_PI/6.0)*d*d*d; // average number of depletants in volume m_lambda[i_type] = this->m_n_R*V; } } template<class Shape> void IntegratorMCMMonoImplicit< Shape >::initializePoissonDistribution() { m_poisson.resize(this->m_pdata->getNTypes()); for (unsigned int i_type = 0; i_type < this->m_pdata->getNTypes(); ++i_type) { // parameter for Poisson distribution Scalar lambda = m_lambda[i_type]; if (lambda <= Scalar(0.0)) { // guard against invalid parameters continue; } m_poisson[i_type] = std::poisson_distribution<unsigned int>(lambda); } } template< class Shape > void IntegratorMCMMonoImplicit< Shape >::updateCellWidth() { this->m_nominal_width = this->getMaxCoreDiameter(); if (m_n_R > Scalar(0.0)) { // add range of depletion interaction quat<Scalar> o; Shape tmp(o, this->m_params[m_type]); this->m_nominal_width += tmp.getCircumsphereDiameter(); // update image list range this->m_extra_image_width = tmp.getCircumsphereDiameter(); } // Account for patch width if (this->m_patch) { Scalar max_extent = 0.0; for (unsigned int typ = 0; typ < this->m_pdata->getNTypes(); typ++) { max_extent = std::max(max_extent, this->m_patch->getAdditiveCutoff(typ)); } this->m_nominal_width = std::max(this->m_nominal_width, this->m_patch->getRCut() + max_extent); } this->m_exec_conf->msg->notice(5) << "IntegratorMCMMonoImplicit: updating nominal width to " << this->m_nominal_width << std::endl; } template< class Shape > void IntegratorMCMMonoImplicit< Shape >::update(unsigned int timestep) { this->m_exec_conf->msg->notice(10) << "MCMMonoImplicit update: " << timestep << std::endl; IntegratorMCM::update(timestep); // update poisson distributions if (m_need_initialize_poisson) { updatePoissonParameters(); initializePoissonDistribution(); m_need_initialize_poisson = false; } if (!m_rng_initialized) { unsigned int n_omp_threads = 1; #ifdef _OPENMP n_omp_threads = omp_get_max_threads(); #endif // initialize a set of random number generators for (unsigned int i = 0; i < n_omp_threads; ++i) { m_rng_depletant.push_back(hoomd::detail::Saru(timestep,this->m_seed+this->m_exec_conf->getRank(), i)); } m_rng_initialized = true; } // get needed vars ArrayHandle<mcm_counters_t> h_counters(this->m_count_total, access_location::host, access_mode::readwrite); mcm_counters_t& counters = h_counters.data[0]; ArrayHandle<mcm_implicit_counters_t> h_implicit_counters(m_implicit_count, access_location::host, access_mode::readwrite); mcm_implicit_counters_t& implicit_counters = h_implicit_counters.data[0]; m_implicit_count_step_start = implicit_counters; const BoxDim& box = this->m_pdata->getBox(); unsigned int ndim = this->m_sysdef->getNDimensions(); #ifdef ENABLE_MPI // compute the width of the active region Scalar3 npd = box.getNearestPlaneDistance(); Scalar3 ghost_fraction = this->m_nominal_width / npd; #endif // Shuffle the order of particles for this step this->m_update_order.resize(this->m_pdata->getN()); this->m_update_order.shuffle(timestep); // update the AABB Tree this->buildAABBTree(); // limit m_d entries so that particles cannot possibly wander more than one box image in one time step this->limitMoveDistances(); // update the image list this->updateImageList(); // combine the three seeds std::vector<unsigned int> seed_seq(3); seed_seq[0] = this->m_seed; seed_seq[1] = timestep; seed_seq[2] = this->m_exec_conf->getRank(); std::seed_seq seed(seed_seq.begin(), seed_seq.end()); // RNG for poisson distribution std::mt19937 rng_poisson(seed); if (this->m_prof) this->m_prof->push(this->m_exec_conf, "MCM implicit"); // access depletant insertion sphere dimensions ArrayHandle<Scalar> h_d_min(m_d_min, access_location::host, access_mode::read); ArrayHandle<Scalar> h_d_max(m_d_max, access_location::host, access_mode::read); // loop over local particles nselect times for (unsigned int i_nselect = 0; i_nselect < this->m_nselect; i_nselect++) { // access particle data and system box ArrayHandle<Scalar4> h_postype(this->m_pdata->getPositions(), access_location::host, access_mode::readwrite); ArrayHandle<Scalar4> h_orientation(this->m_pdata->getOrientationArray(), access_location::host, access_mode::readwrite); ArrayHandle<Scalar> h_diameter(this->m_pdata->getDiameters(), access_location::host, access_mode::read); ArrayHandle<Scalar> h_charge(this->m_pdata->getCharges(), access_location::host, access_mode::read); // access interaction matrix ArrayHandle<unsigned int> h_overlaps(this->m_overlaps, access_location::host, access_mode::read); //access move sizes ArrayHandle<Scalar> h_d(this->m_d, access_location::host, access_mode::read); ArrayHandle<Scalar> h_a(this->m_a, access_location::host, access_mode::read); // loop through N particles in a shuffled order for (unsigned int cur_particle = 0; cur_particle < this->m_pdata->getN(); cur_particle++) { unsigned int i = this->m_update_order[cur_particle]; // read in the current position and orientation Scalar4 postype_i = h_postype.data[i]; Scalar4 orientation_i = h_orientation.data[i]; vec3<Scalar> pos_i = vec3<Scalar>(postype_i); #ifdef ENABLE_MPI if (this->m_comm) { // only move particle if active if (!isActive(make_scalar3(postype_i.x, postype_i.y, postype_i.z), box, ghost_fraction)) continue; } #endif // make a trial move for i hoomd::detail::Saru rng_i(i, this->m_seed + this->m_exec_conf->getRank()*this->m_nselect + i_nselect, timestep); int typ_i = __scalar_as_int(postype_i.w); Shape shape_i(quat<Scalar>(orientation_i), this->m_params[typ_i]); unsigned int move_type_select = rng_i.u32() & 0xffff; bool move_type_translate = !shape_i.hasOrientation() || (move_type_select < this->m_move_ratio); Shape shape_old(quat<Scalar>(orientation_i), this->m_params[typ_i]); vec3<Scalar> pos_old = pos_i; if (move_type_translate) { move_translate(pos_i, rng_i, h_d.data[typ_i], ndim); #ifdef ENABLE_MPI if (this->m_comm) { // check if particle has moved into the ghost layer, and skip if it is if (!isActive(vec_to_scalar3(pos_i), box, ghost_fraction)) continue; } #endif } else { move_rotate(shape_i.orientation, rng_i, h_a.data[typ_i], ndim); } // check for overlaps with neighboring particle's positions bool overlap=false; OverlapReal r_cut_patch = 0; if (this->m_patch && !this->m_patch_log) { r_cut_patch = this->m_patch->getRCut() + 0.5*this->m_patch->getAdditiveCutoff(typ_i); } OverlapReal R_query = std::max(shape_i.getCircumsphereDiameter()/OverlapReal(2.0), r_cut_patch-this->getMinCoreDiameter()/(OverlapReal)2.0); detail::AABB aabb_i_local = detail::AABB(vec3<Scalar>(0,0,0),R_query); // patch + field interaction deltaU double patch_field_energy_diff = 0; // All image boxes (including the primary) const unsigned int n_images = this->m_image_list.size(); for (unsigned int cur_image = 0; cur_image < n_images; cur_image++) { vec3<Scalar> pos_i_image = pos_i + this->m_image_list[cur_image]; detail::AABB aabb = aabb_i_local; aabb.translate(pos_i_image); // stackless search for (unsigned int cur_node_idx = 0; cur_node_idx < this->m_aabb_tree.getNumNodes(); cur_node_idx++) { if (detail::overlap(this->m_aabb_tree.getNodeAABB(cur_node_idx), aabb)) { if (this->m_aabb_tree.isNodeLeaf(cur_node_idx)) { for (unsigned int cur_p = 0; cur_p < this->m_aabb_tree.getNodeNumParticles(cur_node_idx); cur_p++) { // read in its position and orientation unsigned int j = this->m_aabb_tree.getNodeParticle(cur_node_idx, cur_p); Scalar4 postype_j; Scalar4 orientation_j; // handle j==i situations if ( j != i ) { // load the position and orientation of the j particle postype_j = h_postype.data[j]; orientation_j = h_orientation.data[j]; } else { if (cur_image == 0) { // in the first image, skip i == j continue; } else { // If this is particle i and we are in an outside image, use the translated position and orientation postype_j = make_scalar4(pos_i.x, pos_i.y, pos_i.z, postype_i.w); orientation_j = quat_to_scalar4(shape_i.orientation); } } // put particles in coordinate system of particle i vec3<Scalar> r_ij = vec3<Scalar>(postype_j) - pos_i_image; unsigned int typ_j = __scalar_as_int(postype_j.w); Shape shape_j(quat<Scalar>(orientation_j), this->m_params[typ_j]); counters.overlap_checks++; // check circumsphere overlap OverlapReal rsq = dot(r_ij,r_ij); OverlapReal DaDb = shape_i.getCircumsphereDiameter() + shape_j.getCircumsphereDiameter(); bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb); Scalar r_cut_ij = 0.0; if (this->m_patch) r_cut_ij = r_cut_patch + 0.5*this->m_patch->getAdditiveCutoff(typ_j); if (h_overlaps.data[this->m_overlap_idx(typ_i,typ_j)] && circumsphere_overlap && test_overlap(r_ij, shape_i, shape_j, counters.overlap_err_count)) { overlap = true; break; } // If there is no overlap and m_patch is not NULL, calculate energy else if (this->m_patch && !this->m_patch_log && rsq <= r_cut_ij*r_cut_ij) { patch_field_energy_diff -= this->m_patch->energy(r_ij, typ_i, quat<float>(shape_i.orientation), h_diameter.data[i], h_charge.data[i], typ_j, quat<float>(orientation_j), h_diameter.data[j], h_charge.data[j] ); } } } } else { // skip ahead cur_node_idx += this->m_aabb_tree.getNodeSkip(cur_node_idx); } if (overlap) break; } // end loop over AABB nodes if (overlap) break; } // end loop over images // whether the move is accepted bool accept = !overlap; // In most cases checking patch energy should be cheaper than computing // depletants, so do that first. Calculate old patch energy only if // m_patch not NULL and no overlaps. Note that we are computing U_old-U_new // and then exponentiating directly (rather than exp(-(U_new-U_old))) if (this->m_patch && !this->m_patch_log && accept) { for (unsigned int cur_image = 0; cur_image < n_images; cur_image++) { vec3<Scalar> pos_i_image = pos_old + this->m_image_list[cur_image]; detail::AABB aabb = aabb_i_local; aabb.translate(pos_i_image); // stackless search for (unsigned int cur_node_idx = 0; cur_node_idx < this->m_aabb_tree.getNumNodes(); cur_node_idx++) { if (detail::overlap(this->m_aabb_tree.getNodeAABB(cur_node_idx), aabb)) { if (this->m_aabb_tree.isNodeLeaf(cur_node_idx)) { for (unsigned int cur_p = 0; cur_p < this->m_aabb_tree.getNodeNumParticles(cur_node_idx); cur_p++) { // read in its position and orientation unsigned int j = this->m_aabb_tree.getNodeParticle(cur_node_idx, cur_p); Scalar4 postype_j; Scalar4 orientation_j; // handle j==i situations if ( j != i ) { // load the position and orientation of the j particle postype_j = h_postype.data[j]; orientation_j = h_orientation.data[j]; } else { if (cur_image == 0) { // in the first image, skip i == j continue; } else { // If this is particle i and we are in an outside image, use the translated position and orientation postype_j = make_scalar4(pos_old.x, pos_old.y, pos_old.z, postype_i.w); orientation_j = quat_to_scalar4(shape_old.orientation); } } // put particles in coordinate system of particle i vec3<Scalar> r_ij = vec3<Scalar>(postype_j) - pos_i_image; unsigned int typ_j = __scalar_as_int(postype_j.w); Shape shape_j(quat<Scalar>(orientation_j), this->m_params[typ_j]); if (dot(r_ij,r_ij) <= r_cut_patch*r_cut_patch) patch_field_energy_diff += this->m_patch->energy(r_ij, typ_i, quat<float>(orientation_i), h_diameter.data[i], h_charge.data[i], typ_j, quat<float>(orientation_j), h_diameter.data[j], h_charge.data[j]); } } } else { // skip ahead cur_node_idx += this->m_aabb_tree.getNodeSkip(cur_node_idx); } } // end loop over AABB nodes } // end loop over images // Add external energetic contribution if (this->m_external) { patch_field_energy_diff -= this->m_external->energydiff(i, pos_old, shape_old, pos_i, shape_i); } // Update acceptance based on patch, will only be reached if overlap check succeeded accept = rng_i.d() < slow::exp(patch_field_energy_diff); } // end if (m_patch) // Depletant check if (accept) { // log of acceptance probability Scalar lnb(0.0); unsigned int zero = 0; // The trial move is valid. Now generate random depletant particles in a sphere // of radius (d_max+d_depletant+move size)/2.0 around the original particle position // draw number from Poisson distribution unsigned int n = 0; if (m_lambda[typ_i] > Scalar(0.0)) { n = m_poisson[typ_i](rng_poisson); } unsigned int n_overlap_checks = 0; unsigned int overlap_err_count = 0; unsigned int insert_count = 0; unsigned int reinsert_count = 0; unsigned int free_volume_count = 0; unsigned int overlap_count = 0; volatile bool flag=false; #pragma omp parallel for reduction(+ : lnb, n_overlap_checks, overlap_err_count, insert_count, reinsert_count, free_volume_count, overlap_count) reduction(max: zero) shared(flag) if (n>0) schedule(dynamic) for (unsigned int k = 0; k < n; ++k) { if (flag) { #ifndef _OPENMP break; #else continue; #endif } insert_count++; // generate a random depletant coordinate and orientation in the sphere around the new position vec3<Scalar> pos_test; quat<Scalar> orientation_test; #ifdef _OPENMP unsigned int thread_idx = omp_get_thread_num(); #else unsigned int thread_idx = 0; #endif generateDepletant(m_rng_depletant[thread_idx], pos_i, h_d_max.data[typ_i], h_d_min.data[typ_i], pos_test, orientation_test, this->m_params[m_type]); Shape shape_test(orientation_test, this->m_params[m_type]); detail::AABB aabb_test_local = shape_test.getAABB(vec3<Scalar>(0,0,0)); bool overlap_depletant = false; // Check if the new configuration of particle i generates an overlap for (unsigned int cur_image = 0; cur_image < n_images; cur_image++) { vec3<Scalar> pos_test_image = pos_test + this->m_image_list[cur_image]; detail::AABB aabb = aabb_test_local; aabb.translate(pos_test_image); vec3<Scalar> r_ij = pos_i - pos_test_image; n_overlap_checks++; // check circumsphere overlap OverlapReal rsq = dot(r_ij,r_ij); OverlapReal DaDb = shape_test.getCircumsphereDiameter() + shape_i.getCircumsphereDiameter(); bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb); if (h_overlaps.data[this->m_overlap_idx(m_type, typ_i)] && circumsphere_overlap && test_overlap(r_ij, shape_test, shape_i, overlap_err_count)) { overlap_depletant = true; overlap_count++; break; } } if (overlap_depletant) { // check against overlap with old position bool overlap_old = false; // Check if the old configuration of particle i generates an overlap for (unsigned int cur_image = 0; cur_image < n_images; cur_image++) { vec3<Scalar> pos_test_image = pos_test + this->m_image_list[cur_image]; vec3<Scalar> r_ij = vec3<Scalar>(h_postype.data[i]) - pos_test_image; n_overlap_checks++; // check circumsphere overlap Shape shape_i_old(quat<Scalar>(h_orientation.data[i]), this->m_params[typ_i]); OverlapReal rsq = dot(r_ij,r_ij); OverlapReal DaDb = shape_test.getCircumsphereDiameter() + shape_i_old.getCircumsphereDiameter(); bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb); if (h_overlaps.data[this->m_overlap_idx(m_type, typ_i)] && circumsphere_overlap && test_overlap(r_ij, shape_test, shape_i_old, overlap_err_count)) { overlap_old = true; break; } } if (!overlap_old) { // All image boxes (including the primary) const unsigned int n_images = this->m_image_list.size(); for (unsigned int cur_image = 0; cur_image < n_images; cur_image++) { vec3<Scalar> pos_test_image = pos_test + this->m_image_list[cur_image]; detail::AABB aabb = aabb_test_local; aabb.translate(pos_test_image); // stackless search for (unsigned int cur_node_idx = 0; cur_node_idx < this->m_aabb_tree.getNumNodes(); cur_node_idx++) { if (detail::overlap(this->m_aabb_tree.getNodeAABB(cur_node_idx), aabb)) { if (this->m_aabb_tree.isNodeLeaf(cur_node_idx)) { for (unsigned int cur_p = 0; cur_p < this->m_aabb_tree.getNodeNumParticles(cur_node_idx); cur_p++) { // read in its position and orientation unsigned int j = this->m_aabb_tree.getNodeParticle(cur_node_idx, cur_p); // we checked ptl i first if (i == j) continue; Scalar4 postype_j; Scalar4 orientation_j; // load the old position and orientation of the j particle postype_j = h_postype.data[j]; orientation_j = h_orientation.data[j]; // put particles in coordinate system of particle i vec3<Scalar> r_ij = vec3<Scalar>(postype_j) - pos_test_image; unsigned int typ_j = __scalar_as_int(postype_j.w); Shape shape_j(quat<Scalar>(orientation_j), this->m_params[typ_j]); n_overlap_checks++; // check circumsphere overlap OverlapReal rsq = dot(r_ij,r_ij); OverlapReal DaDb = shape_test.getCircumsphereDiameter() + shape_j.getCircumsphereDiameter(); bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb); if (h_overlaps.data[this->m_overlap_idx(m_type,typ_j)] && circumsphere_overlap && test_overlap(r_ij, shape_test, shape_j, overlap_err_count)) { // depletant is ignored for any overlap in the old configuration overlap_old = true; break; } } } } else { // skip ahead cur_node_idx += this->m_aabb_tree.getNodeSkip(cur_node_idx); } if (overlap_old) break; } // end loop over AABB nodes if (overlap_old) break; } // end loop over images } if (!overlap_old) { free_volume_count++; } else { // the depletant overlap doesn't count since it was already overlapping // in the old configuration overlap_depletant = false; } } if (overlap_depletant && !m_n_trial) { zero = 1; // break out of loop flag = true; } else if (overlap_depletant && m_n_trial) { const typename Shape::param_type& params_depletant = this->m_params[m_type]; // Number of successful depletant insertions in new configuration unsigned int n_success_new = 0; // Number of allowed insertion trials (those which overlap with colloid at old position) unsigned int n_overlap_shape_new = 0; // diameter (around origin) in which we are guaruanteed to intersect with the shape Scalar delta_insphere = Scalar(2.0)*shape_i.getInsphereRadius(); // same for old reverse move. Because we have already sampled one successful insertion // that overlaps with the colloid at the new position, we increment by one (super-detailed // balance) unsigned int n_success_old = 1; unsigned int n_overlap_shape_old = 1; Scalar4& postype_i_old = h_postype.data[i]; vec3<Scalar> pos_i_old(postype_i_old); quat<Scalar> orientation_i_old(h_orientation.data[i]); for (unsigned int l = 0; l < m_n_trial; ++l) { // generate a random depletant position and orientation // in both the old and the new configuration of the colloid particle vec3<Scalar> pos_depletant_old, pos_depletant_new; quat<Scalar> orientation_depletant_old, orientation_depletant_new; // try moving the overlapping depletant in the excluded volume // such that it overlaps with the particle at the old position generateDepletantRestricted(m_rng_depletant[thread_idx], pos_i_old, h_d_max.data[typ_i], delta_insphere, pos_depletant_new, orientation_depletant_new, params_depletant, pos_i); reinsert_count++; Shape shape_depletant_new(orientation_depletant_new, params_depletant); const typename Shape::param_type& params_i = this->m_params[__scalar_as_int(postype_i_old.w)]; bool overlap_shape = false; if (insertDepletant(pos_depletant_new, shape_depletant_new, i, this->m_params.data(), h_overlaps.data, typ_i, h_postype.data, h_orientation.data, pos_i, shape_i.orientation, params_i, n_overlap_checks, overlap_err_count, overlap_shape, false)) { n_success_new++; } if (overlap_shape) { // depletant overlaps with colloid at old position n_overlap_shape_new++; } if (l >= 1) { // as above, in excluded volume sphere at new position generateDepletantRestricted(m_rng_depletant[thread_idx], pos_i, h_d_max.data[typ_i], delta_insphere, pos_depletant_old, orientation_depletant_old, params_depletant, pos_i_old); Shape shape_depletant_old(orientation_depletant_old, params_depletant); if (insertDepletant(pos_depletant_old, shape_depletant_old, i, this->m_params.data(), h_overlaps.data, typ_i, h_postype.data, h_orientation.data, pos_i, shape_i.orientation, params_i, n_overlap_checks, overlap_err_count, overlap_shape, true)) { n_success_old++; } if (overlap_shape) { // depletant overlaps with colloid at new position n_overlap_shape_old++; } reinsert_count++; } n_overlap_checks += counters.overlap_checks; overlap_err_count += counters.overlap_err_count; } // end loop over re-insertion attempts if (n_success_new != 0) { lnb += log((Scalar)n_success_new/(Scalar)n_overlap_shape_new); lnb -= log((Scalar)n_success_old/(Scalar)n_overlap_shape_old); } else { zero = 1; // break out of loop flag = true; } } // end if depletant overlap } // end loop over depletants // increment counters counters.overlap_checks += n_overlap_checks; counters.overlap_err_count += overlap_err_count; implicit_counters.insert_count += insert_count; implicit_counters.free_volume_count += free_volume_count; implicit_counters.overlap_count += overlap_count; implicit_counters.reinsert_count += reinsert_count; // apply acceptance criterium if (!zero) { accept = rng_i.f() < exp(lnb); } else { accept = false; } } // end depletant placement // if the move is accepted if (accept) { // increment accept counter and assign new position if (!shape_i.ignoreStatistics()) { if (move_type_translate) counters.translate_accept_count++; else counters.rotate_accept_count++; } // update the position of the particle in the tree for future updates detail::AABB aabb = aabb_i_local; aabb.translate(pos_i); this->m_aabb_tree.update(i, aabb); // update position of particle h_postype.data[i] = make_scalar4(pos_i.x,pos_i.y,pos_i.z,postype_i.w); if (shape_i.hasOrientation()) { h_orientation.data[i] = quat_to_scalar4(shape_i.orientation); } } else { if (!shape_i.ignoreStatistics()) { // increment reject counter if (move_type_translate) counters.translate_reject_count++; else counters.rotate_reject_count++; } } } // end loop over all particles } // end loop over nselect { ArrayHandle<Scalar4> h_postype(this->m_pdata->getPositions(), access_location::host, access_mode::readwrite); ArrayHandle<int3> h_image(this->m_pdata->getImages(), access_location::host, access_mode::readwrite); // wrap particles back into box for (unsigned int i = 0; i < this->m_pdata->getN(); i++) { box.wrap(h_postype.data[i], h_image.data[i]); } } // perform the grid shift #ifdef ENABLE_MPI if (this->m_comm) { ArrayHandle<Scalar4> h_postype(this->m_pdata->getPositions(), access_location::host, access_mode::readwrite); ArrayHandle<int3> h_image(this->m_pdata->getImages(), access_location::host, access_mode::readwrite); // precalculate the grid shift hoomd::detail::Saru rng(timestep, this->m_seed, 0xf4a3210e); Scalar3 shift = make_scalar3(0,0,0); shift.x = rng.s(-this->m_nominal_width/Scalar(2.0),this->m_nominal_width/Scalar(2.0)); shift.y = rng.s(-this->m_nominal_width/Scalar(2.0),this->m_nominal_width/Scalar(2.0)); if (this->m_sysdef->getNDimensions() == 3) { shift.z = rng.s(-this->m_nominal_width/Scalar(2.0),this->m_nominal_width/Scalar(2.0)); } for (unsigned int i = 0; i < this->m_pdata->getN(); i++) { // read in the current position and orientation Scalar4 postype_i = h_postype.data[i]; vec3<Scalar> r_i = vec3<Scalar>(postype_i); // translation from local to global coordinates r_i += vec3<Scalar>(shift); h_postype.data[i] = vec_to_scalar4(r_i, postype_i.w); box.wrap(h_postype.data[i], h_image.data[i]); } this->m_pdata->translateOrigin(shift); } #endif if (this->m_prof) this->m_prof->pop(this->m_exec_conf); // migrate and exchange particles this->communicate(true); // all particle have been moved, the aabb tree is now invalid this->m_aabb_tree_invalid = true; } /* \param rng The random number generator * \param pos_sphere Center of sphere * \param delta diameter of sphere * \param d_min Diameter of smaller sphere excluding depletant * \param pos Position of depletant (return value) * \param orientation ion of depletant (return value) * \param params_depletant Depletant parameters */ template<class Shape> template<class RNG> inline void IntegratorMCMMonoImplicit<Shape>::generateDepletant(RNG& rng, vec3<Scalar> pos_sphere, Scalar delta, Scalar d_min, vec3<Scalar>& pos, quat<Scalar>& orientation, const typename Shape::param_type& params_depletant) { // draw a random vector in the excluded volume sphere of the colloid Scalar theta = rng.template s<Scalar>(Scalar(0.0),Scalar(2.0*M_PI)); Scalar z = rng.template s<Scalar>(Scalar(-1.0),Scalar(1.0)); // random normalized vector vec3<Scalar> n(fast::sqrt(Scalar(1.0)-z*z)*fast::cos(theta),fast::sqrt(Scalar(1.0)-z*z)*fast::sin(theta),z); // draw random radial coordinate in test sphere Scalar r3 = rng.template s<Scalar>(fast::pow(d_min/delta,Scalar(3.0)),Scalar(1.0)); Scalar r = Scalar(0.5)*delta*fast::pow(r3,Scalar(1.0/3.0)); // test depletant position vec3<Scalar> pos_depletant = pos_sphere+r*n; Shape shape_depletant(quat<Scalar>(), params_depletant); if (shape_depletant.hasOrientation()) { orientation = generateRandomOrientation(rng); } pos = pos_depletant; } /* \param rng The random number generator * \param pos_sphere Center of sphere * \param delta diameter of sphere * \param delta_other diameter of other sphere * \param pos Position of depletant (return value) * \param orientation ion of depletant (return value) * \param params_depletant Depletant parameters * \params pos_sphere_other Center of other sphere */ template<class Shape> template<class RNG> inline void IntegratorMCMMonoImplicit<Shape>::generateDepletantRestricted(RNG& rng, vec3<Scalar> pos_sphere, Scalar delta, Scalar delta_other, vec3<Scalar>& pos, quat<Scalar>& orientation, const typename Shape::param_type& params_depletant, vec3<Scalar> pos_sphere_other) { vec3<Scalar> r_ij = pos_sphere - pos_sphere_other; Scalar d = fast::sqrt(dot(r_ij,r_ij)); Scalar rmin(0.0); Scalar rmax = Scalar(0.5)*delta; Scalar ctheta_min(-1.0); bool do_rotate = false; if (d > Scalar(0.0) && delta_other > Scalar(0.0)) { // draw a random direction in the bounded sphereical shell Scalar ctheta = (delta_other*delta_other+Scalar(4.0)*d*d-delta*delta)/(Scalar(4.0)*delta_other*d); if (ctheta >= Scalar(-1.0) && ctheta < Scalar(1.0)) { // true intersection, we can restrict angular sampling ctheta_min = ctheta; } // is there an intersection? if (Scalar(2.0)*d < delta+delta_other) { // sample in shell around smaller sphere rmin = delta_other/Scalar(2.0); rmax = d+delta/Scalar(2.0); do_rotate = true; } } // draw random radial coordinate in a spherical shell Scalar r3 = rng.template s<Scalar>(fast::pow(rmin/rmax,Scalar(3.0)),Scalar(1.0)); Scalar r = rmax*fast::pow(r3,Scalar(1.0/3.0)); // random direction in spherical shell Scalar z = rng.s(ctheta_min,Scalar(1.0)); Scalar phi = Scalar(2.0*M_PI)*rng.template s<Scalar>(); vec3<Scalar> n; if (do_rotate) { vec3<Scalar> u(r_ij/d); // normal vector vec3<Scalar> v(cross(u,vec3<Scalar>(0,0,1))); if (dot(v,v) < EPSILON) { v = cross(u,vec3<Scalar>(0,1,0)); } v *= fast::rsqrt(dot(v,v)); quat<Scalar> q(quat<Scalar>::fromAxisAngle(u,phi)); n = z*u+(fast::sqrt(Scalar(1.0)-z*z))*rotate(q,v); } else { n = vec3<Scalar>(fast::sqrt(Scalar(1.0)-z*z)*fast::cos(phi),fast::sqrt(Scalar(1.0)-z*z)*fast::sin(phi),z); } // test depletant position pos = r*n; if (do_rotate) { // insert such that it potentially intersects the sphere, but not the other one pos += pos_sphere_other; } else { // insert in sphere pos += pos_sphere; } Shape shape_depletant(quat<Scalar>(), params_depletant); if (shape_depletant.hasOrientation()) { orientation = generateRandomOrientation(rng); } } /*! \param pos_depletant Depletant position * \param shape_depletant Depletant shape * \param idx Index of updated particle * \param h_overlaps Interaction matrix * \param typ_i type of updated particle * \param h_orientation ion array * \param pos_new New position of updated particle * \param orientation_new New orientation of updated particle * \param params_new New shape parameters of updated particle * \param counters MCM overlap counters */ template<class Shape> inline bool IntegratorMCMMonoImplicit<Shape>::insertDepletant(vec3<Scalar>& pos_depletant, const Shape& shape_depletant, unsigned int idx, typename Shape::param_type *params, unsigned int *h_overlaps, unsigned int typ_i, Scalar4 *h_postype, Scalar4 *h_orientation, vec3<Scalar> pos_new, quat<Scalar>& orientation_new, const typename Shape::param_type& params_new, unsigned int &n_overlap_checks, unsigned int &overlap_err_count, bool& overlap_shape, bool new_config) { overlap_shape=false; detail::AABB aabb_depletant_local = shape_depletant.getAABB(vec3<Scalar>(0,0,0)); // now check if depletant overlaps with moved particle in the old configuration Shape shape_i(quat<Scalar>(), params_new); if (shape_i.hasOrientation()) { if (! new_config) { // load old orientation Scalar4 orientation_i = h_orientation[idx]; shape_i.orientation = quat<Scalar>(orientation_i); } else { shape_i.orientation = orientation_new; } } vec3<Scalar> pos_i; if (!new_config) { // load old position pos_i = vec3<Scalar>(h_postype[idx]); } else { pos_i = pos_new; } // only need to consider the (0,0,0) image detail::AABB aabb = aabb_depletant_local; aabb.translate(pos_depletant); // put particles in coordinate system of depletant vec3<Scalar> r_ij = pos_i - pos_depletant; n_overlap_checks++; // test circumsphere overlap OverlapReal rsq = dot(r_ij,r_ij); OverlapReal DaDb = shape_depletant.getCircumsphereDiameter() + shape_i.getCircumsphereDiameter(); bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb); if (h_overlaps[this->m_overlap_idx(typ_i, m_type)] && circumsphere_overlap && test_overlap(r_ij, shape_depletant, shape_i, overlap_err_count)) { overlap_shape = true; } // same, but for reverse move if (shape_i.hasOrientation()) { if (new_config) { // load old orientation Scalar4 orientation_i = h_orientation[idx]; shape_i.orientation = quat<Scalar>(orientation_i); } else { shape_i.orientation = orientation_new; } } if (new_config) { // load old position pos_i = vec3<Scalar>(h_postype[idx]); } else { pos_i = pos_new; } // only need to consider the (0,0,0) image aabb = aabb_depletant_local; aabb.translate(pos_depletant); // put particles in coordinate system of depletant r_ij = pos_i - pos_depletant; n_overlap_checks++; // test circumsphere overlap rsq = dot(r_ij,r_ij); DaDb = shape_depletant.getCircumsphereDiameter() + shape_i.getCircumsphereDiameter(); circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb); // check for overlaps with neighboring particle's positions bool overlap=false; if (h_overlaps[this->m_overlap_idx(m_type, typ_i)] && circumsphere_overlap && test_overlap(r_ij, shape_depletant, shape_i, overlap_err_count)) { // if we are already overlapping in the other configuration, this doesn't count as an insertion overlap = true; } if (!overlap && overlap_shape) { // All image boxes (including the primary) const unsigned int n_images = this->m_image_list.size(); for (unsigned int cur_image = 0; cur_image < n_images; cur_image++) { vec3<Scalar> pos_depletant_image = pos_depletant + this->m_image_list[cur_image]; detail::AABB aabb = aabb_depletant_local; aabb.translate(pos_depletant_image); // stackless search for (unsigned int cur_node_idx = 0; cur_node_idx < this->m_aabb_tree.getNumNodes(); cur_node_idx++) { if (detail::overlap(this->m_aabb_tree.getNodeAABB(cur_node_idx), aabb)) { if (this->m_aabb_tree.isNodeLeaf(cur_node_idx)) { for (unsigned int cur_p = 0; cur_p < this->m_aabb_tree.getNodeNumParticles(cur_node_idx); cur_p++) { // read in its position and orientation unsigned int j = this->m_aabb_tree.getNodeParticle(cur_node_idx, cur_p); // load the position and orientation of the j particle Scalar4 postype_j = h_postype[j]; vec3<Scalar> pos_j(postype_j); Scalar4 orientation_j = h_orientation[j]; unsigned int type = __scalar_as_int(postype_j.w); Shape shape_j(quat<Scalar>(orientation_j), params[type]); if (j == idx) { // we have already exclued overlap with the moved particle above continue; } // put particles in coordinate system of depletant vec3<Scalar> r_ij = pos_j - pos_depletant_image; n_overlap_checks++; // check circumsphere overlap OverlapReal rsq = dot(r_ij,r_ij); OverlapReal DaDb = shape_depletant.getCircumsphereDiameter() + shape_j.getCircumsphereDiameter(); bool circumsphere_overlap = (rsq*OverlapReal(4.0) <= DaDb * DaDb); if (h_overlaps[this->m_overlap_idx(type, m_type)] && circumsphere_overlap && test_overlap(r_ij, shape_depletant, shape_j, overlap_err_count)) { overlap = true; break; } } } } else { // skip ahead cur_node_idx += this->m_aabb_tree.getNodeSkip(cur_node_idx); } if (overlap) break; } // end loop over AABB nodes if (overlap) break; } // end loop over images } // end if overlap with shape return overlap_shape && !overlap; } /*! \param quantity Name of the log quantity to get \param timestep Current time step of the simulation \return the requested log quantity. */ template<class Shape> Scalar IntegratorMCMMonoImplicit<Shape>::getLogValue(const std::string& quantity, unsigned int timestep) { if (quantity == "mcm_fugacity") { return (Scalar) m_n_R; } if (quantity == "mcm_ntrial") { return (Scalar) m_n_trial; } mcm_counters_t counters = IntegratorMCM::getCounters(2); mcm_implicit_counters_t implicit_counters = getImplicitCounters(2); if (quantity == "mcm_insert_count") { // return number of depletant insertions per colloid if (counters.getNMoves() > 0) return (Scalar)implicit_counters.insert_count/(Scalar)counters.getNMoves(); else return Scalar(0.0); } if (quantity == "mcm_reinsert_count") { // return number of overlapping depletants reinserted per colloid if (counters.getNMoves() > 0) return (Scalar)implicit_counters.reinsert_count/(Scalar)counters.getNMoves(); else return Scalar(0.0); } if (quantity == "mcm_free_volume_fraction") { // return fraction of free volume in depletant insertion sphere return (Scalar) implicit_counters.getFreeVolumeFraction(); } if (quantity == "mcm_overlap_fraction") { // return fraction of overlapping depletants after trial move return (Scalar) implicit_counters.getOverlapFraction(); } if (quantity == "mcm_configurational_bias_ratio") { // return fraction of overlapping depletants after trial move return (Scalar) implicit_counters.getConfigurationalBiasRatio(); } //nothing found -> pass on to base class return IntegratorMCMMono<Shape>::getLogValue(quantity, timestep); } /*! \param mode 0 -> Absolute count, 1 -> relative to the start of the run, 2 -> relative to the last executed step \return The current state of the acceptance counters IntegratorMCMMonoImplicit maintains a count of the number of accepted and rejected moves since instantiation. getCounters() provides the current value. The parameter *mode* controls whether the returned counts are absolute, relative to the start of the run, or relative to the start of the last executed step. */ template<class Shape> mcm_implicit_counters_t IntegratorMCMMonoImplicit<Shape>::getImplicitCounters(unsigned int mode) { ArrayHandle<mcm_implicit_counters_t> h_counters(m_implicit_count, access_location::host, access_mode::read); mcm_implicit_counters_t result; if (mode == 0) result = h_counters.data[0]; else if (mode == 1) result = h_counters.data[0] - m_implicit_count_run_start; else result = h_counters.data[0] - m_implicit_count_step_start; #ifdef ENABLE_MPI if (this->m_comm) { // MPI Reduction to total result values on all ranks MPI_Allreduce(MPI_IN_PLACE, &result.insert_count, 1, MPI_LONG_LONG_INT, MPI_SUM, this->m_exec_conf->getMPICommunicator()); MPI_Allreduce(MPI_IN_PLACE, &result.free_volume_count, 1, MPI_LONG_LONG_INT, MPI_SUM, this->m_exec_conf->getMPICommunicator()); MPI_Allreduce(MPI_IN_PLACE, &result.overlap_count, 1, MPI_LONG_LONG_INT, MPI_SUM, this->m_exec_conf->getMPICommunicator()); MPI_Allreduce(MPI_IN_PLACE, &result.reinsert_count, 1, MPI_LONG_LONG_INT, MPI_SUM, this->m_exec_conf->getMPICommunicator()); } #endif return result; } /*! NPT simulations are not supported with implicit depletants (The Nmu_ptPT ensemble is instable) \returns false if resize results in overlaps */ template<class Shape> bool IntegratorMCMMonoImplicit<Shape>::attemptBoxResize(unsigned int timestep, const BoxDim& new_box) { this->m_exec_conf->msg->error() << "Nmu_pPT simulations are unsupported." << std::endl; throw std::runtime_error("Error during implicit depletant integration\n"); } //! Export this mcm integrator to python /*! \param name Name of the class in the exported python module \tparam Shape An instantiation of IntegratorMCMMono<Shape> will be exported */ template < class Shape > void export_IntegratorMCMMonoImplicit(pybind11::module& m, const std::string& name) { pybind11::class_<IntegratorMCMMonoImplicit<Shape>, std::shared_ptr< IntegratorMCMMonoImplicit<Shape> > >(m, name.c_str(), pybind11::base< IntegratorMCMMono<Shape> >()) .def(pybind11::init< std::shared_ptr<SystemDefinition>, unsigned int >()) .def("setDepletantDensity", &IntegratorMCMMonoImplicit<Shape>::setDepletantDensity) .def("setDepletantType", &IntegratorMCMMonoImplicit<Shape>::setDepletantType) .def("setNTrial", &IntegratorMCMMonoImplicit<Shape>::setNTrial) .def("getNTrial", &IntegratorMCMMonoImplicit<Shape>::getNTrial) .def("getImplicitCounters", &IntegratorMCMMonoImplicit<Shape>::getImplicitCounters) ; } //! Export the counters for depletants inline void export_mcm_implicit_counters(pybind11::module& m) { pybind11::class_< mcm_implicit_counters_t >(m, "mcm_implicit_counters_t") .def_readwrite("insert_count", &mcm_implicit_counters_t::insert_count) .def_readwrite("reinsert_count", &mcm_implicit_counters_t::reinsert_count) .def_readwrite("free_volume_count", &mcm_implicit_counters_t::free_volume_count) .def_readwrite("overlap_count", &mcm_implicit_counters_t::overlap_count) .def("getFreeVolumeFraction", &mcm_implicit_counters_t::getFreeVolumeFraction) .def("getOverlapFraction", &mcm_implicit_counters_t::getOverlapFraction) .def("getConfigurationalBiasRatio", &mcm_implicit_counters_t::getConfigurationalBiasRatio) ; } } // end namespace mcm #endif // __MCM_MONO_IMPLICIT__H__
fftbench.c
// // fftbench.c // // A simple FFT implmentation for use as a micro-controller benchmark. This is an in-place // Radix-2 Decimation In Time FFT using fixed point arithmetic. // // When reimplementing this benchmark in other languages please honour the intention of // this benchmark by following the algorithm as closely as possible. This version is based off // of bech_fft.spin which is to be regarded as the "mother" of all versions of this benchmark // in other languages. // // This FFT was developed from the description by Douglas L. Jones at // http://cnx.org/content/m12016/latest/. // It is written as a direct implementation of the discussion and diagrams on that page // with an emphasis on clarity and ease of understanding rather than speed. // // // This file is released under the terms of the MIT license. See below. // // Credits: // // A big thank you to Dave Hein for clearing up some issues during a great FFT debate on // the Parallax Inc Propller discussion forum: // http://forums.parallax.com/showthread.php?127306-Fourier-for-dummies-under-construction // // History: // // 2011-02-27 v1.0 Initial version. // // 2012-10-04 v1.1 Added support for parallel processing using OpenMP // A crude attempt at parallelization using up to 4 cores max. // // 2012-12-05 v1.2 Changed to use "parallel for" OMP construct. // Configured for 4 cores max. // #include <stdio.h> #include <sys/time.h> #include "firmware.h" #ifdef _OPENMP // Only include omp if it is available #include <omp.h> #else // Otherwise redefine some omp functions to remove compiler errors #define omp_get_max_threads() 1 #define omp_get_thread_num() 1 #endif #define int32_t int #define int16_t short int // Specify size of FFT buffer here with length and log base 2 of the length. // N.B. Changing this will require changing the "twiddle factor" tables. // and may also require changing the fixed point format (if going bigger) #define FFT_SIZE 1024 #define LOG2_FFT_SIZE 10 // cos and sin parts of the signal to be analysed // Result is written back to here. // Just write input sammles to bx and zero all by. static int32_t bx[FFT_SIZE]; static int32_t by[FFT_SIZE]; // Set if array bounds exceeded int rangeError = 0; static void fillInput(void); static void decimate(void); void butterflies(int32_t* bx, int32_t* by, int32_t firstLevel, int32_t lastLevel, int32_t slices, int32_t slen); static void printSpectrum(void); static void print_omp_version(void) { #ifdef _OPENMP printf("OpenMP version = "); switch (_OPENMP) { case 200805: printf("3.0"); break; case 200505: printf("2.5"); break; case 200203: printf("2.0"); break; default: printf("Unknown. _OPENMP = %d", _OPENMP); break; } printf("\n"); #else print_str("OpenMP not available on this system\r\n"); #endif } void fft_bench(void) { long long startTime, endTime; #ifdef _OPENMP int tid; #endif int s, slen; int firstLevel; int lastLevel; int slice; int slices; print_str ("fft_bench v1.2\r\n"); print_omp_version(); // Input some data fillInput(); // HACK, when playing on a single CPU ensure we have some threads like 4 core // omp_set_num_threads(2); // Start benchmark timer startTime = time_us(); // Radix-2 Decimation In Time, the bit-reversal step. decimate(); // Our FFT array will be split into slices. each slice can be handled by it's own thread // slices = 1; // lastLevel = LOG2_FFT_SIZE - 1; slices = 2; lastLevel = LOG2_FFT_SIZE - 2; //slices = 4; //lastLevel = LOG2_FFT_SIZE - 3; // slices = 8; // lastLevel = LOG2_FFT_SIZE - 4; // slices = 16; // lastLevel = LOG2_FFT_SIZE - 5; firstLevel = 0; for ( ; slices >= 1; slices = slices / 2) { #pragma omp parallel for default (none) \ shared (bx, by) \ private (slice, s, slen, tid) \ firstprivate(slices, firstLevel, lastLevel) for (slice = 0; slice < slices; slice++) { s = FFT_SIZE * slice / slices; slen = FFT_SIZE / slices; butterflies(&bx[s], &by[s], firstLevel, lastLevel, slices, slen); } lastLevel = lastLevel + 1; firstLevel = lastLevel; } // Did we have an array bounds violation? if (rangeError) print_str ("Error: Array bounds violation\n"); // Stop benchmark timer endTime = time_us(); // Print resulting spectrum printSpectrum(); print_str("1024 point bit-reversal and butterfly run time = "); print_dec (endTime - startTime); print_str("us\r\n"); } // Integer square root static int sqrti(int i) { int s = 0; int t = 1 << 30; while (t) { s |= t; if (s <= i) { i -= s; s += t; } else s -= t; s >>= 1; t >>= 2; } return(s); } static void printSpectrum() { int32_t f, real, imag, magnitude; // Spectrum is available in first half of the buffers after FFT. print_str("Freq. Magnitude\r\n"); for (f = 0; f <= FFT_SIZE / 2; f++) { // Frequency magnitde is square root of cos part sqaured plus sin part squared real = bx[f] / FFT_SIZE; imag = by[f] / FFT_SIZE; magnitude = sqrti ((real * real) + (imag * imag)); if (magnitude > 0) { print_hex (f, 8); print_str(" "); print_hex (magnitude, 8); print_str("\r\n"); } } } // For testing define 16 samples of an input wave form here. static int32_t input[] = {4096, 3784, 2896, 1567, 0, -1567, -2896, -3784, -4096, -3784, -2896, -1567, 0, 1567, 2896, 3784}; // Fill buffer bx with samples of of an imput signal and clear by. static void fillInput() { int32_t k; for (k = 0; k <=FFT_SIZE - 1; k++) { // Two frequencies of the waveform defined in input bx[k] = (input[(3*k) % 16] / 4); bx[k] += (input[(5*k) % 16] / 4); // The highest frequency if (k & 1) bx[k] += (4096 / 8); else bx[k] += (-4096 / 8); // A DC level bx[k] += (4096 / 8); // Clear Y array. by[k] = 0; } } // Reverse length low order bits of integer static unsigned int bitReverse(unsigned int x, unsigned int length) { x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); x = (x >> 16) | (x << 16); return (x >> (32 - length)); } // Radix-2 decimation in time. // Moves every sample of bx and by to a postion given by // reversing the bits of its original array index. static void decimate() { int32_t i, revi, tx1, ty1; for (i = 0; i <= FFT_SIZE - 1; i++) { revi = bitReverse (i, LOG2_FFT_SIZE); if (i < revi) { tx1 = bx[i]; ty1 = by[i]; bx[i] = bx[revi]; by[i] = by[revi]; bx[revi] = tx1; by[revi] = ty1; } } } static int32_t *wx; static int32_t *wy; // Apply FFT butterflies to N complex samples in buffers bx and by, in time decimated order! // Resulting FFT is produced in bx and by in the correct order. void butterflies(int32_t* bx, int32_t* by, int32_t firstLevel, int32_t lastLevel, int32_t slices, int32_t slen) { int32_t flightSize = 1 << firstLevel; int32_t wDelta = FFT_SIZE / (2 * (1 << firstLevel)); int32_t noFlights = wDelta / slices; // Loop though the decimation levels // lastLevel is logN - 1 for (int32_t level = firstLevel; level <= lastLevel; level++) { int32_t flightIndex = 0; // Loop through each flight on a level. for (int32_t flight = 0; flight < noFlights; flight++) { int32_t wIndex = 0; // Loop through butterflies within a flight. for (int32_t butterfly = 0; butterfly < flightSize; butterfly++) { int32_t b0 = flightIndex + butterfly; int32_t b1 = b0 + flightSize; // Check that we are within our array slice if ((b0 < 0) || (b0 >= slen)) rangeError = 1; if ((b1 < 0) || (b1 >= slen)) rangeError = 1; // At last...the butterfly. // Get X[b1] int32_t a = bx[b1]; int32_t b = by[b1]; // Get W[wIndex] int32_t c = wx[wIndex]; int32_t d = wy[wIndex]; // Somewhat optimized complex multiply int32_t k1 = (a * (c + d)) >> 12; // T = X[b1] * W[wIndex] int32_t k2 = (d * (a + b)) >> 12; int32_t k3 = (c * (b - a)) >> 12; int32_t tx = k1 - k2; int32_t ty = k1 + k3; k1 = bx[b0]; k2 = by[b0]; // X[b1] = X[b0] * T bx[b1] = k1 - tx; by[b1] = k2 - ty; // X[b0] = X[b0] * T bx[b0] = k1 + tx; by[b0] = k2 + ty; wIndex += wDelta; } flightIndex += flightSize << 1; } flightSize <<= 1; noFlights >>= 1; wDelta >>= 1; } } // Cosine from 0 to 3π/2 (0 to 270 degrees) static int32_t cos[768] = { 4095, 4094, 4094, 4094, 4093, 4093, 4092, 4091, 4090, 4088, 4087, 4085, 4083, 4081, 4079, 4077, 4075, 4072, 4070, 4067, 4064, 4061, 4057, 4054, 4050, 4046, 4042, 4038, 4034, 4030, 4025, 4021, 4016, 4011, 4006, 4000, 3995, 3989, 3984, 3978, 3972, 3966, 3959, 3953, 3946, 3939, 3932, 3925, 3918, 3911, 3903, 3896, 3888, 3880, 3872, 3864, 3855, 3847, 3838, 3829, 3820, 3811, 3802, 3792, 3783, 3773, 3763, 3753, 3743, 3733, 3723, 3712, 3701, 3691, 3680, 3668, 3657, 3646, 3634, 3623, 3611, 3599, 3587, 3575, 3563, 3550, 3537, 3525, 3512, 3499, 3486, 3473, 3459, 3446, 3432, 3418, 3404, 3390, 3376, 3362, 3348, 3333, 3318, 3304, 3289, 3274, 3258, 3243, 3228, 3212, 3197, 3181, 3165, 3149, 3133, 3117, 3100, 3084, 3067, 3051, 3034, 3017, 3000, 2983, 2965, 2948, 2930, 2913, 2895, 2877, 2859, 2841, 2823, 2805, 2787, 2768, 2750, 2731, 2712, 2693, 2674, 2655, 2636, 2617, 2597, 2578, 2558, 2539, 2519, 2499, 2479, 2459, 2439, 2419, 2398, 2378, 2357, 2337, 2316, 2295, 2275, 2254, 2233, 2211, 2190, 2169, 2148, 2126, 2105, 2083, 2061, 2040, 2018, 1996, 1974, 1952, 1930, 1908, 1885, 1863, 1841, 1818, 1796, 1773, 1750, 1728, 1705, 1682, 1659, 1636, 1613, 1590, 1567, 1543, 1520, 1497, 1473, 1450, 1426, 1403, 1379, 1355, 1332, 1308, 1284, 1260, 1236, 1212, 1188, 1164, 1140, 1116, 1092, 1067, 1043, 1019, 994, 970, 946, 921, 897, 872, 848, 823, 798, 774, 749, 724, 700, 675, 650, 625, 600, 575, 551, 526, 501, 476, 451, 426, 401, 376, 351, 326, 301, 276, 251, 226, 200, 175, 150, 125, 100, 75, 50, 25, 0, -25, -50, -75, -100, -125, -150, -175, -200, -226, -251, -276, -301, -326, -351, -376, -401, -426, -451, -476, -501, -526, -551, -576, -600, -625, -650, -675, -700, -724, -749, -774, -798, -823, -848, -872, -897, -921, -946, -970, -995, -1019, -1043, -1067, -1092, -1116, -1140, -1164, -1188, -1212, -1236, -1260, -1284, -1308, -1332, -1355, -1379, -1403, -1426, -1450, -1473, -1497, -1520, -1543, -1567, -1590, -1613, -1636, -1659, -1682, -1705, -1728, -1750, -1773, -1796, -1818, -1841, -1863, -1885, -1908, -1930, -1952, -1974, -1996, -2018, -2040, -2062, -2083, -2105, -2126, -2148, -2169, -2190, -2212, -2233, -2254, -2275, -2295, -2316, -2337, -2357, -2378, -2398, -2419, -2439, -2459, -2479, -2499, -2519, -2539, -2558, -2578, -2597, -2617, -2636, -2655, -2674, -2693, -2712, -2731, -2750, -2768, -2787, -2805, -2823, -2841, -2859, -2877, -2895, -2913, -2930, -2948, -2965, -2983, -3000, -3017, -3034, -3051, -3067, -3084, -3100, -3117, -3133, -3149, -3165, -3181, -3197, -3212, -3228, -3243, -3258, -3274, -3289, -3304, -3318, -3333, -3348, -3362, -3376, -3390, -3404, -3418, -3432, -3446, -3459, -3473, -3486, -3499, -3512, -3525, -3537, -3550, -3563, -3575, -3587, -3599, -3611, -3623, -3634, -3646, -3657, -3669, -3680, -3691, -3701, -3712, -3723, -3733, -3743, -3753, -3763, -3773, -3783, -3792, -3802, -3811, -3820, -3829, -3838, -3847, -3855, -3864, -3872, -3880, -3888, -3896, -3903, -3911, -3918, -3925, -3932, -3939, -3946, -3953, -3959, -3966, -3972, -3978, -3984, -3989, -3995, -4000, -4006, -4011, -4016, -4021, -4025, -4030, -4034, -4038, -4043, -4046, -4050, -4054, -4057, -4061, -4064, -4067, -4070, -4072, -4075, -4077, -4079, -4081, -4083, -4085, -4087, -4088, -4090, -4091, -4092, -4093, -4093, -4094, -4094, -4094, -4094, -4094, -4094, -4094, -4093, -4093, -4092, -4091, -4090, -4088, -4087, -4085, -4083, -4081, -4079, -4077, -4075, -4072, -4070, -4067, -4064, -4061, -4057, -4054, -4050, -4046, -4042, -4038, -4034, -4030, -4025, -4021, -4016, -4011, -4006, -4000, -3995, -3989, -3984, -3978, -3972, -3966, -3959, -3953, -3946, -3939, -3932, -3925, -3918, -3911, -3903, -3896, -3888, -3880, -3872, -3863, -3855, -3847, -3838, -3829, -3820, -3811, -3802, -3792, -3783, -3773, -3763, -3753, -3743, -3733, -3723, -3712, -3701, -3691, -3680, -3668, -3657, -3646, -3634, -3623, -3611, -3599, -3587, -3575, -3562, -3550, -3537, -3525, -3512, -3499, -3486, -3473, -3459, -3446, -3432, -3418, -3404, -3390, -3376, -3362, -3347, -3333, -3318, -3304, -3289, -3274, -3258, -3243, -3228, -3212, -3197, -3181, -3165, -3149, -3133, -3117, -3100, -3084, -3067, -3050, -3034, -3017, -3000, -2983, -2965, -2948, -2930, -2913, -2895, -2877, -2859, -2841, -2823, -2805, -2787, -2768, -2749, -2731, -2712, -2693, -2674, -2655, -2636, -2617, -2597, -2578, -2558, -2539, -2519, -2499, -2479, -2459, -2439, -2419, -2398, -2378, -2357, -2337, -2316, -2295, -2275, -2254, -2233, -2211, -2190, -2169, -2148, -2126, -2105, -2083, -2061, -2040, -2018, -1996, -1974, -1952, -1930, -1908, -1885, -1863, -1841, -1818, -1796, -1773, -1750, -1728, -1705, -1682, -1659, -1636, -1613, -1590, -1567, -1543, -1520, -1497, -1473, -1450, -1426, -1403, -1379, -1355, -1332, -1308, -1284, -1260, -1236, -1212, -1188, -1164, -1140, -1116, -1092, -1067, -1043, -1019, -994, -970, -946, -921, -897, -872, -848, -823, -798, -774, -749, -724, -700, -675, -650, -625, -600, -575, -551, -526, -501, -476, -451, -426, -401, -376, -351, -326, -301, -276, -251, -225, -200, -175, -150, -125, -100, -75, -50, -25 }; // Half cycle of cos static int32_t *wx = &cos[0]; // Half cycle of minus sine static int32_t *wy = &cos[256]; // This file is distributed under the terms of the The MIT License as follows: // // Copyright (c) 2012 Michael Rychlik // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE.
convolution_1x1_pack8to1_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_pack8to1_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; const int size = w * h; Mat bottom_im2col = bottom_blob; bottom_im2col.w = size; bottom_im2col.h = 1; im2col_sgemm_pack8to1_int8_neon(bottom_im2col, top_blob, kernel, opt); } static void conv1x1s2_pack8to1_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * 8; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const signed char* r0 = bottom_blob.channel(p); signed char* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { int8x8_t _v0 = vld1_s8(r0); int8x8_t _v1 = vld1_s8(r0 + 16); int8x8_t _v2 = vld1_s8(r0 + 32); int8x8_t _v3 = vld1_s8(r0 + 48); vst1_s8(outptr, _v0); vst1_s8(outptr + 8, _v1); vst1_s8(outptr + 16, _v2); vst1_s8(outptr + 24, _v3); r0 += 64; outptr += 32; } for (; j + 1 < outw; j += 2) { int8x8_t _v0 = vld1_s8(r0); int8x8_t _v1 = vld1_s8(r0 + 16); vst1_s8(outptr, _v0); vst1_s8(outptr + 8, _v1); r0 += 32; outptr += 16; } for (; j < outw; j++) { int8x8_t _v = vld1_s8(r0); vst1_s8(outptr, _v); r0 += 16; outptr += 8; } r0 += tailstep; } } conv1x1s1_sgemm_pack8to1_int8_neon(bottom_blob_shrinked, top_blob, kernel, opt); }
44344d69fda836b6e9b491cf03824e78c7d00a5c.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "omp.h" struct dataobj { void *restrict data; int * size; int * npsize; int * dsize; int * hsize; int * hofs; int * oofs; } ; struct profiler { double section0; } ; int initdamp(struct dataobj *restrict damp_vec, const float h_x, const float h_y, const float h_z, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int abc_x_l_ltkn, const int abc_x_r_rtkn, const int abc_y_l_ltkn, const int abc_y_r_rtkn, const int abc_z_l_ltkn, const int abc_z_r_rtkn, struct profiler * timers) { float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data; #pragma omp target enter data map(to: damp[0:damp_vec->size[0]][0:damp_vec->size[1]][0:damp_vec->size[2]]) struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ #pragma omp target teams distribute parallel for collapse(3) for (int abc_x_l = x_m; abc_x_l <= abc_x_l_ltkn + x_m - 1; abc_x_l += 1) { for (int y = y_m; y <= y_M; y += 1) { for (int z = z_m; z <= z_M; z += 1) { damp[abc_x_l + 1][y + 1][z + 1] += (-4.12276274369678e-2F*sin(6.28318530717959F*fabs(2.5e-2F*x_m - 2.5e-2F*abc_x_l + 1.025F)) + 2.5904082296183e-1F*fabs(2.5e-2F*x_m - 2.5e-2F*abc_x_l + 1.025F))/h_x; } } } #pragma omp target teams distribute parallel for collapse(3) for (int abc_x_r = -abc_x_r_rtkn + x_M + 1; abc_x_r <= x_M; abc_x_r += 1) { for (int y = y_m; y <= y_M; y += 1) { for (int z = z_m; z <= z_M; z += 1) { damp[abc_x_r + 1][y + 1][z + 1] += (-4.12276274369678e-2F*sin(6.28318530717959F*fabs(-2.5e-2F*x_M + 2.5e-2F*abc_x_r + 1.025F)) + 2.5904082296183e-1F*fabs(-2.5e-2F*x_M + 2.5e-2F*abc_x_r + 1.025F))/h_x; } } } #pragma omp target teams distribute parallel for collapse(1) for (int x = x_m; x <= x_M; x += 1) { for (int abc_y_l = y_m; abc_y_l <= abc_y_l_ltkn + y_m - 1; abc_y_l += 1) { for (int z = z_m; z <= z_M; z += 1) { damp[x + 1][abc_y_l + 1][z + 1] += (-4.12276274369678e-2F*sin(6.28318530717959F*fabs(2.5e-2F*y_m - 2.5e-2F*abc_y_l + 1.025F)) + 2.5904082296183e-1F*fabs(2.5e-2F*y_m - 2.5e-2F*abc_y_l + 1.025F))/h_y; } } for (int abc_y_r = -abc_y_r_rtkn + y_M + 1; abc_y_r <= y_M; abc_y_r += 1) { for (int z = z_m; z <= z_M; z += 1) { damp[x + 1][abc_y_r + 1][z + 1] += (-4.12276274369678e-2F*sin(6.28318530717959F*fabs(-2.5e-2F*y_M + 2.5e-2F*abc_y_r + 1.025F)) + 2.5904082296183e-1F*fabs(-2.5e-2F*y_M + 2.5e-2F*abc_y_r + 1.025F))/h_y; } } for (int y = y_m; y <= y_M; y += 1) { for (int abc_z_l = z_m; abc_z_l <= abc_z_l_ltkn + z_m - 1; abc_z_l += 1) { damp[x + 1][y + 1][abc_z_l + 1] += (-4.12276274369678e-2F*sin(6.28318530717959F*fabs(2.5e-2F*z_m - 2.5e-2F*abc_z_l + 1.025F)) + 2.5904082296183e-1F*fabs(2.5e-2F*z_m - 2.5e-2F*abc_z_l + 1.025F))/h_z; } for (int abc_z_r = -abc_z_r_rtkn + z_M + 1; abc_z_r <= z_M; abc_z_r += 1) { damp[x + 1][y + 1][abc_z_r + 1] += (-4.12276274369678e-2F*sin(6.28318530717959F*fabs(-2.5e-2F*z_M + 2.5e-2F*abc_z_r + 1.025F)) + 2.5904082296183e-1F*fabs(-2.5e-2F*z_M + 2.5e-2F*abc_z_r + 1.025F))/h_z; } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000; #pragma omp target update from(damp[0:damp_vec->size[0]][0:damp_vec->size[1]][0:damp_vec->size[2]]) #pragma omp target exit data map(release: damp[0:damp_vec->size[0]][0:damp_vec->size[1]][0:damp_vec->size[2]]) return 0; }
MergeShuffle.h
#pragma once #include "DefaultRandomGenerator.h" #include "shuffle/Shuffle.h" #include <algorithm> #include <thread> #include <vector> template <class ContainerType = std::vector<uint64_t>, class RandomGenerator = DefaultRandomGenerator> class MergeShuffle : public Shuffle<ContainerType, RandomGenerator> { private: static constexpr unsigned long cutoff = 0x10000; std::vector<DefaultRandomGenerator> generators; struct Flipper { Flipper( RandomGenerator& generator ) : g( generator ) { } RandomGenerator& g; uint64_t current = 0; uint64_t index = 0; bool operator()() { if( index == 0 ) current = g(); bool res = ( current >> index ) & 1; index = ( index + 1 ) % 64; return res; } }; static inline unsigned long randomInt( Flipper flip, unsigned long n ) { unsigned long v = 1; unsigned long d = 0; while( true ) { d += d + flip(); v += v; if( v >= n ) { if( d < n ) return d; v -= n; d -= n; } } } template <class T> void merge( T* start, uint64_t mid_idx, uint64_t end_idx, RandomGenerator& g ) { T* const original_start = start; T* mid = start + mid_idx; T* end = start + end_idx; Flipper flip( g ); while( true ) { if( flip() ) { if( start == mid ) break; } else { if( mid == end ) break; std::swap( *start, *mid ); mid++; } start++; } while( start != end ) { const uint64_t num_processed = start - original_start; const uint64_t index = randomInt( flip, num_processed ); std::swap( *( original_start + index ), *start ); start++; } } template <class T> void mergeShuffle( T* t, uint64_t n, RandomGenerator& g ) { // Calculate the number of divisions to reach the cutoff unsigned int c = 0; while( ( n >> c ) > cutoff ) c++; unsigned int q = 1 << c; unsigned long nn = n; if( generators.capacity() < q ) generators.reserve( q ); while( generators.size() < q ) generators.emplace_back( g() ); // Launch thread for local fisher yates #pragma omp parallel for for( unsigned int i = 0; i < q; i++ ) { unsigned long j = nn * i >> c; unsigned long k = std::min( nn * ( i + 1 ) >> c, nn ); assert( j < nn ); assert( k <= nn ); std::shuffle( t + j, t + k, this->generators[i] ); } for( unsigned int p = 1; p < q; p += p ) { #pragma omp parallel for for( unsigned int i = 0; i < q; i += 2 * p ) { unsigned long j = nn * i >> c; unsigned long k = nn * ( i + p ) >> c; unsigned long l = std::min( nn * ( i + 2 * p ) >> c, nn ); assert( j < nn ); assert( k < nn ); assert( l <= nn ); merge( t + j, k - j, l - j, this->generators[i] ); } } } public: void shuffle( const ContainerType& in_container, ContainerType& out_container, uint64_t seed, uint64_t num ) override { if( &in_container != &out_container ) { // Copy if we are not doing an inplace operation std::copy( in_container.begin(), in_container.begin() + num, out_container.begin() ); } RandomGenerator g( seed ); mergeShuffle( out_container.data(), num, g ); } bool isDeterministic() const override { return false; } };
main.c
//=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // DEFINE / INCLUDE //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== #include <stdlib.h> #include <math.h> #include <string.h> #include <time.h> #include "AVI/avilib.h" #include "AVI/avimod.h" #include <omp.h> //#include "define.c" #include "kernel.c" //===============================================================================================================================================================================================================200 // WRITE DATA FUNCTION //===============================================================================================================================================================================================================200 void write_data( char* filename, int frameNo, int frames_processed, int endoPoints, int* input_a, int* input_b, int epiPoints, int* input_2a, int* input_2b){ //================================================================================80 // VARIABLES //================================================================================80 FILE* fid; int i,j; char c; //================================================================================80 // OPEN FILE FOR READING //================================================================================80 fid = fopen(filename, "w+"); if( fid == NULL ){ printf( "The file was not opened for writing\n" ); return; } //================================================================================80 // WRITE VALUES TO THE FILE //================================================================================80 fprintf(fid, "Total AVI Frames: %d\n", frameNo); fprintf(fid, "Frames Processed: %d\n", frames_processed); fprintf(fid, "endoPoints: %d\n", endoPoints); fprintf(fid, "epiPoints: %d", epiPoints); for(j=0; j<frames_processed;j++) { fprintf(fid, "\n---Frame %d---",j); fprintf(fid, "\n--endo--\n",j); for(i=0; i<endoPoints; i++){ fprintf(fid, "%d\t", input_a[j+i*frameNo]); } fprintf(fid, "\n"); for(i=0; i<endoPoints; i++){ // if(input_b[j*size+i] > 2000) input_b[j*size+i]=0; fprintf(fid, "%d\t", input_b[j+i*frameNo]); } fprintf(fid, "\n--epi--\n",j); for(i=0; i<epiPoints; i++){ //if(input_2a[j*size_2+i] > 2000) input_2a[j*size_2+i]=0; fprintf(fid, "%d\t", input_2a[j+i*frameNo]); } fprintf(fid, "\n"); for(i=0; i<epiPoints; i++){ //if(input_2b[j*size_2+i] > 2000) input_2b[j*size_2+i]=0; fprintf(fid, "%d\t", input_2b[j+i*frameNo]); } } // ================================================================================80 // CLOSE FILE // ================================================================================80 fclose(fid); } //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== // MAIN FUNCTION //=============================================================================================================================================================================================================== //=============================================================================================================================================================================================================== int main(int argc, char *argv []){ //====================================================================================================================================================== // VARIABLES //====================================================================================================================================================== // counters int i; int frames_processed; // parameters public_struct public; private_struct private[ALL_POINTS]; //====================================================================================================================================================== // FRAMES //====================================================================================================================================================== if(argc!=4){ printf("ERROR: usage: heartwall <inputfile> <num of frames> <num of threads>\n"); exit(1); } char* video_file_name; video_file_name = argv[1]; avi_t* d_frames = (avi_t*)AVI_open_input_file(video_file_name, 1); // added casting if (d_frames == NULL) { AVI_print_error((char *) "Error with AVI_open_input_file"); return -1; } public.d_frames = d_frames; public.frames = AVI_video_frames(public.d_frames); public.frame_rows = AVI_video_height(public.d_frames); public.frame_cols = AVI_video_width(public.d_frames); public.frame_elem = public.frame_rows * public.frame_cols; public.frame_mem = sizeof(fp) * public.frame_elem; //====================================================================================================================================================== // CHECK INPUT ARGUMENTS //====================================================================================================================================================== frames_processed = atoi(argv[2]); if(frames_processed<0 || frames_processed>public.frames){ printf("ERROR: %d is an incorrect number of frames specified, select in the range of 0-%d\n", frames_processed, public.frames); return 0; } int omp_num_threads; omp_num_threads = atoi(argv[3]); if (omp_num_threads <=0){ printf ("num of threads must be a positive integer"); return 0; } printf("num of threads: %d\n", omp_num_threads); //====================================================================================================================================================== // INPUTS //====================================================================================================================================================== //==================================================================================================== // ENDO POINTS //==================================================================================================== public.endoPoints = ENDO_POINTS; public.d_endo_mem = sizeof(int) * public.endoPoints; public.d_endoRow = (int *)malloc(public.d_endo_mem); public.d_endoRow[ 0] = 369; public.d_endoRow[ 1] = 400; public.d_endoRow[ 2] = 429; public.d_endoRow[ 3] = 452; public.d_endoRow[ 4] = 476; public.d_endoRow[ 5] = 486; public.d_endoRow[ 6] = 479; public.d_endoRow[ 7] = 458; public.d_endoRow[ 8] = 433; public.d_endoRow[ 9] = 404; public.d_endoRow[10] = 374; public.d_endoRow[11] = 346; public.d_endoRow[12] = 318; public.d_endoRow[13] = 294; public.d_endoRow[14] = 277; public.d_endoRow[15] = 269; public.d_endoRow[16] = 275; public.d_endoRow[17] = 287; public.d_endoRow[18] = 311; public.d_endoRow[19] = 339; public.d_endoCol = (int *)malloc(public.d_endo_mem); public.d_endoCol[ 0] = 408; public.d_endoCol[ 1] = 406; public.d_endoCol[ 2] = 397; public.d_endoCol[ 3] = 383; public.d_endoCol[ 4] = 354; public.d_endoCol[ 5] = 322; public.d_endoCol[ 6] = 294; public.d_endoCol[ 7] = 270; public.d_endoCol[ 8] = 250; public.d_endoCol[ 9] = 237; public.d_endoCol[10] = 235; public.d_endoCol[11] = 241; public.d_endoCol[12] = 254; public.d_endoCol[13] = 273; public.d_endoCol[14] = 300; public.d_endoCol[15] = 328; public.d_endoCol[16] = 356; public.d_endoCol[17] = 383; public.d_endoCol[18] = 401; public.d_endoCol[19] = 411; public.d_tEndoRowLoc = (int *)malloc(public.d_endo_mem * public.frames); public.d_tEndoColLoc = (int *)malloc(public.d_endo_mem * public.frames); //==================================================================================================== // EPI POINTS //==================================================================================================== public.epiPoints = EPI_POINTS; public.d_epi_mem = sizeof(int) * public.epiPoints; public.d_epiRow = (int *)malloc(public.d_epi_mem); public.d_epiRow[ 0] = 390; public.d_epiRow[ 1] = 419; public.d_epiRow[ 2] = 448; public.d_epiRow[ 3] = 474; public.d_epiRow[ 4] = 501; public.d_epiRow[ 5] = 519; public.d_epiRow[ 6] = 535; public.d_epiRow[ 7] = 542; public.d_epiRow[ 8] = 543; public.d_epiRow[ 9] = 538; public.d_epiRow[10] = 528; public.d_epiRow[11] = 511; public.d_epiRow[12] = 491; public.d_epiRow[13] = 466; public.d_epiRow[14] = 438; public.d_epiRow[15] = 406; public.d_epiRow[16] = 376; public.d_epiRow[17] = 347; public.d_epiRow[18] = 318; public.d_epiRow[19] = 291; public.d_epiRow[20] = 275; public.d_epiRow[21] = 259; public.d_epiRow[22] = 256; public.d_epiRow[23] = 252; public.d_epiRow[24] = 252; public.d_epiRow[25] = 257; public.d_epiRow[26] = 266; public.d_epiRow[27] = 283; public.d_epiRow[28] = 305; public.d_epiRow[29] = 331; public.d_epiRow[30] = 360; public.d_epiCol = (int *)malloc(public.d_epi_mem); public.d_epiCol[ 0] = 457; public.d_epiCol[ 1] = 454; public.d_epiCol[ 2] = 446; public.d_epiCol[ 3] = 431; public.d_epiCol[ 4] = 411; public.d_epiCol[ 5] = 388; public.d_epiCol[ 6] = 361; public.d_epiCol[ 7] = 331; public.d_epiCol[ 8] = 301; public.d_epiCol[ 9] = 273; public.d_epiCol[10] = 243; public.d_epiCol[11] = 218; public.d_epiCol[12] = 196; public.d_epiCol[13] = 178; public.d_epiCol[14] = 166; public.d_epiCol[15] = 157; public.d_epiCol[16] = 155; public.d_epiCol[17] = 165; public.d_epiCol[18] = 177; public.d_epiCol[19] = 197; public.d_epiCol[20] = 218; public.d_epiCol[21] = 248; public.d_epiCol[22] = 276; public.d_epiCol[23] = 304; public.d_epiCol[24] = 333; public.d_epiCol[25] = 361; public.d_epiCol[26] = 391; public.d_epiCol[27] = 415; public.d_epiCol[28] = 434; public.d_epiCol[29] = 448; public.d_epiCol[30] = 455; public.d_tEpiRowLoc = (int *)malloc(public.d_epi_mem * public.frames); public.d_tEpiColLoc = (int *)malloc(public.d_epi_mem * public.frames); //==================================================================================================== // ALL POINTS //==================================================================================================== public.allPoints = ALL_POINTS; //====================================================================================================================================================== // CONSTANTS //====================================================================================================================================================== public.tSize = 25; public.sSize = 40; public.maxMove = 10; public.alpha = 0.87; //====================================================================================================================================================== // SUMS //====================================================================================================================================================== for(i=0; i<public.allPoints; i++){ private[i].in_partial_sum = (fp *)malloc(sizeof(fp) * 2*public.tSize+1); private[i].in_sqr_partial_sum = (fp *)malloc(sizeof(fp) * 2*public.tSize+1); private[i].par_max_val = (fp *)malloc(sizeof(fp) * (2*public.tSize+2*public.sSize+1)); private[i].par_max_coo = (int *)malloc(sizeof(int) * (2*public.tSize+2*public.sSize+1)); } //====================================================================================================================================================== // INPUT 2 (SAMPLE AROUND POINT) //====================================================================================================================================================== public.in2_rows = 2 * public.sSize + 1; public.in2_cols = 2 * public.sSize + 1; public.in2_elem = public.in2_rows * public.in2_cols; public.in2_mem = sizeof(fp) * public.in2_elem; for(i=0; i<public.allPoints; i++){ private[i].d_in2 = (fp *)malloc(public.in2_mem); private[i].d_in2_sqr = (fp *)malloc(public.in2_mem); } //====================================================================================================================================================== // INPUT (POINT TEMPLATE) //====================================================================================================================================================== public.in_mod_rows = public.tSize+1+public.tSize; public.in_mod_cols = public.in_mod_rows; public.in_mod_elem = public.in_mod_rows * public.in_mod_cols; public.in_mod_mem = sizeof(fp) * public.in_mod_elem; for(i=0; i<public.allPoints; i++){ private[i].d_in_mod = (fp *)malloc(public.in_mod_mem); private[i].d_in_sqr = (fp *)malloc(public.in_mod_mem); } //====================================================================================================================================================== // ARRAY OF TEMPLATES FOR ALL POINTS //====================================================================================================================================================== public.d_endoT = (fp *)malloc(public.in_mod_mem * public.endoPoints); public.d_epiT = (fp *)malloc(public.in_mod_mem * public.epiPoints); //====================================================================================================================================================== // SETUP private POINTERS TO ROWS, COLS AND TEMPLATE //====================================================================================================================================================== for(i=0; i<public.endoPoints; i++){ private[i].point_no = i; private[i].in_pointer = private[i].point_no * public.in_mod_elem; private[i].d_Row = public.d_endoRow; // original row coordinates private[i].d_Col = public.d_endoCol; // original col coordinates private[i].d_tRowLoc = public.d_tEndoRowLoc; // updated row coordinates private[i].d_tColLoc = public.d_tEndoColLoc; // updated row coordinates private[i].d_T = public.d_endoT; // templates } #pragma omp parallel for for(i=public.endoPoints; i<public.allPoints; i++){ private[i].point_no = i-public.endoPoints; private[i].in_pointer = private[i].point_no * public.in_mod_elem; private[i].d_Row = public.d_epiRow; private[i].d_Col = public.d_epiCol; private[i].d_tRowLoc = public.d_tEpiRowLoc; private[i].d_tColLoc = public.d_tEpiColLoc; private[i].d_T = public.d_epiT; } //====================================================================================================================================================== // CONVOLUTION //====================================================================================================================================================== public.ioffset = 0; public.joffset = 0; public.conv_rows = public.in_mod_rows + public.in2_rows - 1; // number of rows in I public.conv_cols = public.in_mod_cols + public.in2_cols - 1; // number of columns in I public.conv_elem = public.conv_rows * public.conv_cols; // number of elements public.conv_mem = sizeof(fp) * public.conv_elem; #pragma omp parallel for for(i=0; i<public.allPoints; i++){ private[i].d_conv = (fp *)malloc(public.conv_mem); } //====================================================================================================================================================== // CUMULATIVE SUM //====================================================================================================================================================== //==================================================================================================== // PAD ARRAY //==================================================================================================== //==================================================================================================== // VERTICAL CUMULATIVE SUM //==================================================================================================== public.in2_pad_add_rows = public.in_mod_rows; public.in2_pad_add_cols = public.in_mod_cols; public.in2_pad_rows = public.in2_rows + 2*public.in2_pad_add_rows; public.in2_pad_cols = public.in2_cols + 2*public.in2_pad_add_cols; public.in2_pad_elem = public.in2_pad_rows * public.in2_pad_cols; public.in2_pad_mem = sizeof(fp) * public.in2_pad_elem; #pragma omp parallel for for(i=0; i<public.allPoints; i++){ private[i].d_in2_pad = (fp *)malloc(public.in2_pad_mem); } //==================================================================================================== // SELECTION, SELECTION 2, SUBTRACTION //==================================================================================================== //==================================================================================================== // HORIZONTAL CUMULATIVE SUM //==================================================================================================== public.in2_pad_cumv_sel_rowlow = 1 + public.in_mod_rows; // (1 to n+1) public.in2_pad_cumv_sel_rowhig = public.in2_pad_rows - 1; public.in2_pad_cumv_sel_collow = 1; public.in2_pad_cumv_sel_colhig = public.in2_pad_cols; public.in2_pad_cumv_sel2_rowlow = 1; public.in2_pad_cumv_sel2_rowhig = public.in2_pad_rows - public.in_mod_rows - 1; public.in2_pad_cumv_sel2_collow = 1; public.in2_pad_cumv_sel2_colhig = public.in2_pad_cols; public.in2_sub_rows = public.in2_pad_cumv_sel_rowhig - public.in2_pad_cumv_sel_rowlow + 1; public.in2_sub_cols = public.in2_pad_cumv_sel_colhig - public.in2_pad_cumv_sel_collow + 1; public.in2_sub_elem = public.in2_sub_rows * public.in2_sub_cols; public.in2_sub_mem = sizeof(fp) * public.in2_sub_elem; for(i=0; i<public.allPoints; i++){ private[i].d_in2_sub = (fp *)malloc(public.in2_sub_mem); } //==================================================================================================== // SELECTION, SELECTION 2, SUBTRACTION, SQUARE, NUMERATOR //==================================================================================================== public.in2_sub_cumh_sel_rowlow = 1; public.in2_sub_cumh_sel_rowhig = public.in2_sub_rows; public.in2_sub_cumh_sel_collow = 1 + public.in_mod_cols; public.in2_sub_cumh_sel_colhig = public.in2_sub_cols - 1; public.in2_sub_cumh_sel2_rowlow = 1; public.in2_sub_cumh_sel2_rowhig = public.in2_sub_rows; public.in2_sub_cumh_sel2_collow = 1; public.in2_sub_cumh_sel2_colhig = public.in2_sub_cols - public.in_mod_cols - 1; public.in2_sub2_sqr_rows = public.in2_sub_cumh_sel_rowhig - public.in2_sub_cumh_sel_rowlow + 1; public.in2_sub2_sqr_cols = public.in2_sub_cumh_sel_colhig - public.in2_sub_cumh_sel_collow + 1; public.in2_sub2_sqr_elem = public.in2_sub2_sqr_rows * public.in2_sub2_sqr_cols; public.in2_sub2_sqr_mem = sizeof(fp) * public.in2_sub2_sqr_elem; for(i=0; i<public.allPoints; i++){ private[i].d_in2_sub2_sqr = (fp *)malloc(public.in2_sub2_sqr_mem); } //====================================================================================================================================================== // CUMULATIVE SUM 2 //====================================================================================================================================================== //==================================================================================================== // PAD ARRAY //==================================================================================================== //==================================================================================================== // VERTICAL CUMULATIVE SUM //==================================================================================================== //==================================================================================================== // SELECTION, SELECTION 2, SUBTRACTION //==================================================================================================== //==================================================================================================== // HORIZONTAL CUMULATIVE SUM //==================================================================================================== //==================================================================================================== // SELECTION, SELECTION 2, SUBTRACTION, DIFFERENTIAL LOCAL SUM, DENOMINATOR A, DENOMINATOR, CORRELATION //==================================================================================================== //====================================================================================================================================================== // TEMPLATE MASK CREATE //====================================================================================================================================================== public.tMask_rows = public.in_mod_rows + (public.sSize+1+public.sSize) - 1; public.tMask_cols = public.tMask_rows; public.tMask_elem = public.tMask_rows * public.tMask_cols; public.tMask_mem = sizeof(fp) * public.tMask_elem; #pragma omp parallel for for(i=0; i<public.allPoints; i++){ private[i].d_tMask = (fp *)malloc(public.tMask_mem); } //====================================================================================================================================================== // POINT MASK INITIALIZE //====================================================================================================================================================== public.mask_rows = public.maxMove; public.mask_cols = public.mask_rows; public.mask_elem = public.mask_rows * public.mask_cols; public.mask_mem = sizeof(fp) * public.mask_elem; //====================================================================================================================================================== // MASK CONVOLUTION //====================================================================================================================================================== public.mask_conv_rows = public.tMask_rows; // number of rows in I public.mask_conv_cols = public.tMask_cols; // number of columns in I public.mask_conv_elem = public.mask_conv_rows * public.mask_conv_cols; // number of elements public.mask_conv_mem = sizeof(fp) * public.mask_conv_elem; public.mask_conv_ioffset = (public.mask_rows-1)/2; if((public.mask_rows-1) % 2 > 0.5){ public.mask_conv_ioffset = public.mask_conv_ioffset + 1; } public.mask_conv_joffset = (public.mask_cols-1)/2; if((public.mask_cols-1) % 2 > 0.5){ public.mask_conv_joffset = public.mask_conv_joffset + 1; } #pragma omp parallel for for(i=0; i<public.allPoints; i++){ private[i].d_mask_conv = (fp *)malloc(public.mask_conv_mem); } //====================================================================================================================================================== // PRINT FRAME PROGRESS START //====================================================================================================================================================== printf("frame progress: "); fflush(NULL); //====================================================================================================================================================== // KERNEL //====================================================================================================================================================== for(public.frame_no=0; public.frame_no<frames_processed; public.frame_no++){ //==================================================================================================== // GETTING FRAME //==================================================================================================== // Extract a cropped version of the first frame from the video file public.d_frame = get_frame(public.d_frames, // pointer to video file public.frame_no, // number of frame that needs to be returned 0, // cropped? 0, // scaled? 1); // converted //==================================================================================================== // PROCESSING //==================================================================================================== omp_set_num_threads(omp_num_threads); #pragma omp parallel for for(i=0; i<public.allPoints; i++){ kernel( public, private[i]); } //==================================================================================================== // FREE MEMORY FOR FRAME //==================================================================================================== // free frame after each loop iteration, since AVI library allocates memory for every frame fetched free(public.d_frame); //==================================================================================================== // PRINT FRAME PROGRESS //==================================================================================================== printf("%d ", public.frame_no); fflush(NULL); } //====================================================================================================================================================== // PRINT FRAME PROGRESS END //====================================================================================================================================================== printf("\n"); fflush(NULL); //====================================================================================================================================================== // DEALLOCATION //====================================================================================================================================================== //==================================================50 // DUMP DATA TO FILE //==================================================50 #ifdef OUTPUT write_data( "result.txt", public.frames, frames_processed, public.endoPoints, public.d_tEndoRowLoc, public.d_tEndoColLoc, public.epiPoints, public.d_tEpiRowLoc, public.d_tEpiColLoc); #endif //==================================================================================================== // COMMON //==================================================================================================== free(public.d_endoRow); free(public.d_endoCol); free(public.d_tEndoRowLoc); free(public.d_tEndoColLoc); free(public.d_endoT); free(public.d_epiRow); free(public.d_epiCol); free(public.d_tEpiRowLoc); free(public.d_tEpiColLoc); free(public.d_epiT); //==================================================================================================== // POINTERS //==================================================================================================== #pragma omp parallel for for(i=0; i<public.allPoints; i++){ free(private[i].in_partial_sum); free(private[i].in_sqr_partial_sum); free(private[i].par_max_val); free(private[i].par_max_coo); free(private[i].d_in2); free(private[i].d_in2_sqr); free(private[i].d_in_mod); free(private[i].d_in_sqr); free(private[i].d_conv); free(private[i].d_in2_pad); free(private[i].d_in2_sub); free(private[i].d_in2_sub2_sqr); free(private[i].d_tMask); free(private[i].d_mask_conv); } } //======================================================================================================================================================================================================== //======================================================================================================================================================================================================== // END OF FILE //======================================================================================================================================================================================================== //========================================================================================================================================================================================================
callback.h
#ifndef _BSD_SOURCE #define _BSD_SOURCE #endif #define _DEFAULT_SOURCE #include <stdio.h> #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include <inttypes.h> #include <omp.h> #include <omp-tools.h> #include "ompt-signal.h" // Used to detect architecture #include "../../src/kmp_platform.h" static const char* ompt_thread_t_values[] = { NULL, "ompt_thread_initial", "ompt_thread_worker", "ompt_thread_other" }; static const char* ompt_task_status_t_values[] = { NULL, "ompt_task_complete", // 1 "ompt_task_yield", // 2 "ompt_task_cancel", // 3 "ompt_task_detach", // 4 "ompt_task_early_fulfill", // 5 "ompt_task_late_fulfill", // 6 "ompt_task_switch" // 7 }; static const char* ompt_cancel_flag_t_values[] = { "ompt_cancel_parallel", "ompt_cancel_sections", "ompt_cancel_loop", "ompt_cancel_taskgroup", "ompt_cancel_activated", "ompt_cancel_detected", "ompt_cancel_discarded_task" }; static void format_task_type(int type, char *buffer) { char *progress = buffer; if (type & ompt_task_initial) progress += sprintf(progress, "ompt_task_initial"); if (type & ompt_task_implicit) progress += sprintf(progress, "ompt_task_implicit"); if (type & ompt_task_explicit) progress += sprintf(progress, "ompt_task_explicit"); if (type & ompt_task_target) progress += sprintf(progress, "ompt_task_target"); if (type & ompt_task_undeferred) progress += sprintf(progress, "|ompt_task_undeferred"); if (type & ompt_task_untied) progress += sprintf(progress, "|ompt_task_untied"); if (type & ompt_task_final) progress += sprintf(progress, "|ompt_task_final"); if (type & ompt_task_mergeable) progress += sprintf(progress, "|ompt_task_mergeable"); if (type & ompt_task_merged) progress += sprintf(progress, "|ompt_task_merged"); } static ompt_set_callback_t ompt_set_callback; static ompt_get_callback_t ompt_get_callback; static ompt_get_state_t ompt_get_state; static ompt_get_task_info_t ompt_get_task_info; static ompt_get_task_memory_t ompt_get_task_memory; static ompt_get_thread_data_t ompt_get_thread_data; static ompt_get_parallel_info_t ompt_get_parallel_info; static ompt_get_unique_id_t ompt_get_unique_id; static ompt_finalize_tool_t ompt_finalize_tool; static ompt_get_num_procs_t ompt_get_num_procs; static ompt_get_num_places_t ompt_get_num_places; static ompt_get_place_proc_ids_t ompt_get_place_proc_ids; static ompt_get_place_num_t ompt_get_place_num; static ompt_get_partition_place_nums_t ompt_get_partition_place_nums; static ompt_get_proc_id_t ompt_get_proc_id; static ompt_enumerate_states_t ompt_enumerate_states; static ompt_enumerate_mutex_impls_t ompt_enumerate_mutex_impls; static void print_ids(int level) { int task_type, thread_num; ompt_frame_t *frame; ompt_data_t *task_parallel_data; ompt_data_t *task_data; int exists_task = ompt_get_task_info(level, &task_type, &task_data, &frame, &task_parallel_data, &thread_num); char buffer[2048]; format_task_type(task_type, buffer); if (frame) printf("%" PRIu64 ": task level %d: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", exit_frame=%p, reenter_frame=%p, " "task_type=%s=%d, thread_num=%d\n", ompt_get_thread_data()->value, level, exists_task ? task_parallel_data->value : 0, exists_task ? task_data->value : 0, frame->exit_frame.ptr, frame->enter_frame.ptr, buffer, task_type, thread_num); } #define get_frame_address(level) __builtin_frame_address(level) #define print_frame(level) \ printf("%" PRIu64 ": __builtin_frame_address(%d)=%p\n", \ ompt_get_thread_data()->value, level, get_frame_address(level)) // clang (version 5.0 and above) adds an intermediate function call with debug flag (-g) #if defined(TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN) #if defined(DEBUG) && defined(__clang__) && __clang_major__ >= 5 #define print_frame_from_outlined_fn(level) print_frame(level+1) #else #define print_frame_from_outlined_fn(level) print_frame(level) #endif #if defined(__clang__) && __clang_major__ >= 5 #warning "Clang 5.0 and later add an additional wrapper for outlined functions when compiling with debug information." #warning "Please define -DDEBUG iff you manually pass in -g to make the tests succeed!" #endif #endif // This macro helps to define a label at the current position that can be used // to get the current address in the code. // // For print_current_address(): // To reliably determine the offset between the address of the label and the // actual return address, we insert a NOP instruction as a jump target as the // compiler would otherwise insert an instruction that we can't control. The // instruction length is target dependent and is explained below. // // (The empty block between "#pragma omp ..." and the __asm__ statement is a // workaround for a bug in the Intel Compiler.) #define define_ompt_label(id) \ {} \ __asm__("nop"); \ ompt_label_##id: // This macro helps to get the address of a label that is inserted by the above // macro define_ompt_label(). The address is obtained with a GNU extension // (&&label) that has been tested with gcc, clang and icc. #define get_ompt_label_address(id) (&& ompt_label_##id) // This macro prints the exact address that a previously called runtime function // returns to. #define print_current_address(id) \ define_ompt_label(id) \ print_possible_return_addresses(get_ompt_label_address(id)) #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // On X86 the NOP instruction is 1 byte long. In addition, the comiler inserts // a MOV instruction for non-void runtime functions which is 3 bytes long. #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p or %p for non-void functions\n", \ ompt_get_thread_data()->value, ((char *)addr) - 1, ((char *)addr) - 4) #elif KMP_ARCH_PPC64 // On Power the NOP instruction is 4 bytes long. In addition, the compiler // inserts a second NOP instruction (another 4 bytes). For non-void runtime // functions Clang inserts a STW instruction (but only if compiling under // -fno-PIC which will be the default with Clang 8.0, another 4 bytes). #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \ ((char *)addr) - 8, ((char *)addr) - 12) #elif KMP_ARCH_AARCH64 // On AArch64 the NOP instruction is 4 bytes long, can be followed by inserted // store instruction (another 4 bytes long). #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \ ((char *)addr) - 4, ((char *)addr) - 8) #elif KMP_ARCH_RISCV64 #if __riscv_compressed // On RV64GC the C.NOP instruction is 2 byte long. In addition, the compiler // inserts a J instruction (targeting the successor basic block), which // accounts for another 4 bytes. Finally, an additional J instruction may // appear (adding 4 more bytes) when the C.NOP is referenced elsewhere (ie. // another branch). #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p or %p\n", \ ompt_get_thread_data()->value, ((char *)addr) - 6, ((char *)addr) - 10) #else // On RV64G the NOP instruction is 4 byte long. In addition, the compiler // inserts a J instruction (targeting the successor basic block), which // accounts for another 4 bytes. Finally, an additional J instruction may // appear (adding 4 more bytes) when the NOP is referenced elsewhere (ie. // another branch). #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p or %p\n", \ ompt_get_thread_data()->value, ((char *)addr) - 8, ((char *)addr) - 12) #endif #else #error Unsupported target architecture, cannot determine address offset! #endif // This macro performs a somewhat similar job to print_current_address(), except // that it discards a certain number of nibbles from the address and only prints // the most significant bits / nibbles. This can be used for cases where the // return address can only be approximated. // // To account for overflows (ie the most significant bits / nibbles have just // changed as we are a few bytes above the relevant power of two) the addresses // of the "current" and of the "previous block" are printed. #define print_fuzzy_address(id) \ define_ompt_label(id) \ print_fuzzy_address_blocks(get_ompt_label_address(id)) // If you change this define you need to adapt all capture patterns in the tests // to include or discard the new number of nibbles! #define FUZZY_ADDRESS_DISCARD_NIBBLES 2 #define FUZZY_ADDRESS_DISCARD_BYTES (1 << ((FUZZY_ADDRESS_DISCARD_NIBBLES) * 4)) #define print_fuzzy_address_blocks(addr) \ printf("%" PRIu64 ": fuzzy_address=0x%" PRIx64 " or 0x%" PRIx64 \ " or 0x%" PRIx64 " or 0x%" PRIx64 " (%p)\n", \ ompt_get_thread_data()->value, \ ((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES - 1, \ ((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES, \ ((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES + 1, \ ((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES + 2, addr) #define register_callback_t(name, type) \ do { \ type f_##name = &on_##name; \ if (ompt_set_callback(name, (ompt_callback_t)f_##name) == ompt_set_never) \ printf("0: Could not register callback '" #name "'\n"); \ } while (0) #define register_callback(name) register_callback_t(name, name##_t) #ifndef USE_PRIVATE_TOOL static void on_ompt_callback_mutex_acquire( ompt_mutex_t kind, unsigned int hint, unsigned int impl, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_wait_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_wait_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_critical: printf("%" PRIu64 ": ompt_event_wait_critical: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_atomic: printf("%" PRIu64 ": ompt_event_wait_atomic: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_ordered: printf("%" PRIu64 ": ompt_event_wait_ordered: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; default: break; } } static void on_ompt_callback_mutex_acquired( ompt_mutex_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_acquired_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_acquired_nest_lock_first: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_critical: printf("%" PRIu64 ": ompt_event_acquired_critical: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_atomic: printf("%" PRIu64 ": ompt_event_acquired_atomic: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_ordered: printf("%" PRIu64 ": ompt_event_acquired_ordered: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; default: break; } } static void on_ompt_callback_mutex_released( ompt_mutex_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_release_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_release_nest_lock_last: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_critical: printf("%" PRIu64 ": ompt_event_release_critical: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_atomic: printf("%" PRIu64 ": ompt_event_release_atomic: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_ordered: printf("%" PRIu64 ": ompt_event_release_ordered: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; default: break; } } static void on_ompt_callback_nest_lock( ompt_scope_endpoint_t endpoint, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: printf("%" PRIu64 ": ompt_event_acquired_nest_lock_next: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_scope_end: printf("%" PRIu64 ": ompt_event_release_nest_lock_prev: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; } } static void on_ompt_callback_sync_region( ompt_sync_region_t kind, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: switch(kind) { case ompt_sync_region_barrier: case ompt_sync_region_barrier_implicit: case ompt_sync_region_barrier_explicit: case ompt_sync_region_barrier_implementation: printf("%" PRIu64 ": ompt_event_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); print_ids(0); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_taskwait_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_taskgroup_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_reduction: break; } break; case ompt_scope_end: switch(kind) { case ompt_sync_region_barrier: case ompt_sync_region_barrier_implicit: case ompt_sync_region_barrier_explicit: case ompt_sync_region_barrier_implementation: printf("%" PRIu64 ": ompt_event_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_taskwait_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_taskgroup_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_reduction: break; } break; } } static void on_ompt_callback_sync_region_wait( ompt_sync_region_t kind, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: switch(kind) { case ompt_sync_region_barrier: case ompt_sync_region_barrier_implicit: case ompt_sync_region_barrier_explicit: case ompt_sync_region_barrier_implementation: printf("%" PRIu64 ": ompt_event_wait_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_wait_taskwait_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_wait_taskgroup_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_reduction: break; } break; case ompt_scope_end: switch(kind) { case ompt_sync_region_barrier: case ompt_sync_region_barrier_implicit: case ompt_sync_region_barrier_explicit: case ompt_sync_region_barrier_implementation: printf("%" PRIu64 ": ompt_event_wait_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_wait_taskwait_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_wait_taskgroup_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_reduction: break; } break; } } static void on_ompt_callback_flush( ompt_data_t *thread_data, const void *codeptr_ra) { printf("%" PRIu64 ": ompt_event_flush: codeptr_ra=%p\n", thread_data->value, codeptr_ra); } static void on_ompt_callback_cancel( ompt_data_t *task_data, int flags, const void *codeptr_ra) { const char* first_flag_value; const char* second_flag_value; if(flags & ompt_cancel_parallel) first_flag_value = ompt_cancel_flag_t_values[0]; else if(flags & ompt_cancel_sections) first_flag_value = ompt_cancel_flag_t_values[1]; else if(flags & ompt_cancel_loop) first_flag_value = ompt_cancel_flag_t_values[2]; else if(flags & ompt_cancel_taskgroup) first_flag_value = ompt_cancel_flag_t_values[3]; if(flags & ompt_cancel_activated) second_flag_value = ompt_cancel_flag_t_values[4]; else if(flags & ompt_cancel_detected) second_flag_value = ompt_cancel_flag_t_values[5]; else if(flags & ompt_cancel_discarded_task) second_flag_value = ompt_cancel_flag_t_values[6]; printf("%" PRIu64 ": ompt_event_cancel: task_data=%" PRIu64 ", flags=%s|%s=%" PRIu32 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, task_data->value, first_flag_value, second_flag_value, flags, codeptr_ra); } static void on_ompt_callback_implicit_task( ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, unsigned int team_size, unsigned int thread_num, int flags) { switch(endpoint) { case ompt_scope_begin: if(task_data->ptr) printf("%s\n", "0: task_data initially not null"); task_data->value = ompt_get_unique_id(); //there is no parallel_begin callback for implicit parallel region //thus it is initialized in initial task if(flags & ompt_task_initial) { char buffer[2048]; format_task_type(flags, buffer); if(parallel_data->ptr) printf("%s\n", "0: parallel_data initially not null"); parallel_data->value = ompt_get_unique_id(); printf("%" PRIu64 ": ompt_event_initial_task_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", actual_parallelism=%" PRIu32 ", index=%" PRIu32 ", flags=%" PRIu32 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, team_size, thread_num, flags); } else { printf("%" PRIu64 ": ompt_event_implicit_task_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, team_size, thread_num); } break; case ompt_scope_end: if(flags & ompt_task_initial){ printf("%" PRIu64 ": ompt_event_initial_task_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, team_size, thread_num); } else { printf("%" PRIu64 ": ompt_event_implicit_task_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, team_size, thread_num); } break; } } static void on_ompt_callback_lock_init( ompt_mutex_t kind, unsigned int hint, unsigned int impl, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_init_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_init_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; default: break; } } static void on_ompt_callback_lock_destroy( ompt_mutex_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_destroy_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_destroy_nest_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; default: break; } } static void on_ompt_callback_work( ompt_work_t wstype, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, uint64_t count, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: switch(wstype) { case ompt_work_loop: printf("%" PRIu64 ": ompt_event_loop_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_sections: printf("%" PRIu64 ": ompt_event_sections_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_executor: printf("%" PRIu64 ": ompt_event_single_in_block_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_other: printf("%" PRIu64 ": ompt_event_single_others_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_workshare: //impl break; case ompt_work_distribute: printf("%" PRIu64 ": ompt_event_distribute_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_taskloop: //impl printf("%" PRIu64 ": ompt_event_taskloop_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; } break; case ompt_scope_end: switch(wstype) { case ompt_work_loop: printf("%" PRIu64 ": ompt_event_loop_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_sections: printf("%" PRIu64 ": ompt_event_sections_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_executor: printf("%" PRIu64 ": ompt_event_single_in_block_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_other: printf("%" PRIu64 ": ompt_event_single_others_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_workshare: //impl break; case ompt_work_distribute: printf("%" PRIu64 ": ompt_event_distribute_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_taskloop: //impl printf("%" PRIu64 ": ompt_event_taskloop_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; } break; } } static void on_ompt_callback_master( ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: printf("%" PRIu64 ": ompt_event_master_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_scope_end: printf("%" PRIu64 ": ompt_event_master_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; } } static void on_ompt_callback_parallel_begin( ompt_data_t *encountering_task_data, const ompt_frame_t *encountering_task_frame, ompt_data_t *parallel_data, uint32_t requested_team_size, int flag, const void *codeptr_ra) { if(parallel_data->ptr) printf("0: parallel_data initially not null\n"); parallel_data->value = ompt_get_unique_id(); printf("%" PRIu64 ": ompt_event_parallel_begin: parent_task_id=%" PRIu64 ", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, " "parallel_id=%" PRIu64 ", requested_team_size=%" PRIu32 ", codeptr_ra=%p, invoker=%d\n", ompt_get_thread_data()->value, encountering_task_data->value, encountering_task_frame->exit_frame.ptr, encountering_task_frame->enter_frame.ptr, parallel_data->value, requested_team_size, codeptr_ra, flag); } static void on_ompt_callback_parallel_end(ompt_data_t *parallel_data, ompt_data_t *encountering_task_data, int flag, const void *codeptr_ra) { printf("%" PRIu64 ": ompt_event_parallel_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", invoker=%d, codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, encountering_task_data->value, flag, codeptr_ra); } static void on_ompt_callback_task_create( ompt_data_t *encountering_task_data, const ompt_frame_t *encountering_task_frame, ompt_data_t* new_task_data, int type, int has_dependences, const void *codeptr_ra) { if(new_task_data->ptr) printf("0: new_task_data initially not null\n"); new_task_data->value = ompt_get_unique_id(); char buffer[2048]; format_task_type(type, buffer); printf("%" PRIu64 ": ompt_event_task_create: parent_task_id=%" PRIu64 ", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, new_task_id=%" PRIu64 ", codeptr_ra=%p, task_type=%s=%d, has_dependences=%s\n", ompt_get_thread_data()->value, encountering_task_data ? encountering_task_data->value : 0, encountering_task_frame ? encountering_task_frame->exit_frame.ptr : NULL, encountering_task_frame ? encountering_task_frame->enter_frame.ptr : NULL, new_task_data->value, codeptr_ra, buffer, type, has_dependences ? "yes" : "no"); } static void on_ompt_callback_task_schedule( ompt_data_t *first_task_data, ompt_task_status_t prior_task_status, ompt_data_t *second_task_data) { printf("%" PRIu64 ": ompt_event_task_schedule: first_task_id=%" PRIu64 ", second_task_id=%" PRIu64 ", prior_task_status=%s=%d\n", ompt_get_thread_data()->value, first_task_data->value, second_task_data->value, ompt_task_status_t_values[prior_task_status], prior_task_status); if(prior_task_status == ompt_task_complete) { printf("%" PRIu64 ": ompt_event_task_end: task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, first_task_data->value); } } static void on_ompt_callback_dependences( ompt_data_t *task_data, const ompt_dependence_t *deps, int ndeps) { printf("%" PRIu64 ": ompt_event_task_dependences: task_id=%" PRIu64 ", deps=%p, ndeps=%d\n", ompt_get_thread_data()->value, task_data->value, (void *)deps, ndeps); } static void on_ompt_callback_task_dependence( ompt_data_t *first_task_data, ompt_data_t *second_task_data) { printf("%" PRIu64 ": ompt_event_task_dependence_pair: first_task_id=%" PRIu64 ", second_task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, first_task_data->value, second_task_data->value); } static void on_ompt_callback_thread_begin( ompt_thread_t thread_type, ompt_data_t *thread_data) { if(thread_data->ptr) printf("%s\n", "0: thread_data initially not null"); thread_data->value = ompt_get_unique_id(); printf("%" PRIu64 ": ompt_event_thread_begin: thread_type=%s=%d, thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, ompt_thread_t_values[thread_type], thread_type, thread_data->value); } static void on_ompt_callback_thread_end( ompt_data_t *thread_data) { printf("%" PRIu64 ": ompt_event_thread_end: thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, thread_data->value); } static int on_ompt_callback_control_tool( uint64_t command, uint64_t modifier, void *arg, const void *codeptr_ra) { ompt_frame_t* omptTaskFrame; ompt_get_task_info(0, NULL, (ompt_data_t**) NULL, &omptTaskFrame, NULL, NULL); printf("%" PRIu64 ": ompt_event_control_tool: command=%" PRIu64 ", modifier=%" PRIu64 ", arg=%p, codeptr_ra=%p, current_task_frame.exit=%p, current_task_frame.reenter=%p \n", ompt_get_thread_data()->value, command, modifier, arg, codeptr_ra, omptTaskFrame->exit_frame.ptr, omptTaskFrame->enter_frame.ptr); return 0; //success } int ompt_initialize( ompt_function_lookup_t lookup, int initial_device_num, ompt_data_t *tool_data) { ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback"); ompt_get_callback = (ompt_get_callback_t) lookup("ompt_get_callback"); ompt_get_state = (ompt_get_state_t) lookup("ompt_get_state"); ompt_get_task_info = (ompt_get_task_info_t) lookup("ompt_get_task_info"); ompt_get_task_memory = (ompt_get_task_memory_t)lookup("ompt_get_task_memory"); ompt_get_thread_data = (ompt_get_thread_data_t) lookup("ompt_get_thread_data"); ompt_get_parallel_info = (ompt_get_parallel_info_t) lookup("ompt_get_parallel_info"); ompt_get_unique_id = (ompt_get_unique_id_t) lookup("ompt_get_unique_id"); ompt_finalize_tool = (ompt_finalize_tool_t)lookup("ompt_finalize_tool"); ompt_get_num_procs = (ompt_get_num_procs_t) lookup("ompt_get_num_procs"); ompt_get_num_places = (ompt_get_num_places_t) lookup("ompt_get_num_places"); ompt_get_place_proc_ids = (ompt_get_place_proc_ids_t) lookup("ompt_get_place_proc_ids"); ompt_get_place_num = (ompt_get_place_num_t) lookup("ompt_get_place_num"); ompt_get_partition_place_nums = (ompt_get_partition_place_nums_t) lookup("ompt_get_partition_place_nums"); ompt_get_proc_id = (ompt_get_proc_id_t) lookup("ompt_get_proc_id"); ompt_enumerate_states = (ompt_enumerate_states_t) lookup("ompt_enumerate_states"); ompt_enumerate_mutex_impls = (ompt_enumerate_mutex_impls_t) lookup("ompt_enumerate_mutex_impls"); register_callback(ompt_callback_mutex_acquire); register_callback_t(ompt_callback_mutex_acquired, ompt_callback_mutex_t); register_callback_t(ompt_callback_mutex_released, ompt_callback_mutex_t); register_callback(ompt_callback_nest_lock); register_callback(ompt_callback_sync_region); register_callback_t(ompt_callback_sync_region_wait, ompt_callback_sync_region_t); register_callback(ompt_callback_control_tool); register_callback(ompt_callback_flush); register_callback(ompt_callback_cancel); register_callback(ompt_callback_implicit_task); register_callback_t(ompt_callback_lock_init, ompt_callback_mutex_acquire_t); register_callback_t(ompt_callback_lock_destroy, ompt_callback_mutex_t); register_callback(ompt_callback_work); register_callback(ompt_callback_master); register_callback(ompt_callback_parallel_begin); register_callback(ompt_callback_parallel_end); register_callback(ompt_callback_task_create); register_callback(ompt_callback_task_schedule); register_callback(ompt_callback_dependences); register_callback(ompt_callback_task_dependence); register_callback(ompt_callback_thread_begin); register_callback(ompt_callback_thread_end); printf("0: NULL_POINTER=%p\n", (void*)NULL); return 1; //success } void ompt_finalize(ompt_data_t *tool_data) { printf("0: ompt_event_runtime_shutdown\n"); } #ifdef __cplusplus extern "C" { #endif ompt_start_tool_result_t* ompt_start_tool( unsigned int omp_version, const char *runtime_version) { static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0}; return &ompt_start_tool_result; } #ifdef __cplusplus } #endif #endif // ifndef USE_PRIVATE_TOOL
generator_gemm_common.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include "generator_gemm_common.h" #include "generator_common.h" #include "generator_x86_instructions.h" #include "libxsmm_main.h" LIBXSMM_API_INTERN void libxsmm_generator_gemm_init_micro_kernel_config_fullvector( libxsmm_micro_kernel_config* io_micro_kernel_config, const unsigned int i_arch, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_use_masking_a_c ) { memset(io_micro_kernel_config, 0, sizeof(*io_micro_kernel_config)); /* avoid warning "maybe used uninitialized" */ if ( (i_arch < LIBXSMM_X86_SSE3) || (i_arch > LIBXSMM_X86_ALLFEAT) ) { io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC; io_micro_kernel_config->vector_reg_count = 0; io_micro_kernel_config->use_masking_a_c = 0; io_micro_kernel_config->vector_name = 'a'; io_micro_kernel_config->vector_length = 0; io_micro_kernel_config->datatype_size = 0; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } else if ( i_arch <= LIBXSMM_X86_SSE4 ) { io_micro_kernel_config->instruction_set = LIBXSMM_X86_SSE3; io_micro_kernel_config->vector_reg_count = 16; io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c; io_micro_kernel_config->vector_name = 'x'; if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 2; io_micro_kernel_config->datatype_size = 8; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPD; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPD; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVDDUP; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVAPD; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVUPD; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULPD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDPD; } else { io_micro_kernel_config->vector_length = 4; io_micro_kernel_config->datatype_size = 4; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_SHUFPS; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVAPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVAPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPS; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULPS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDPS; } } else if ( i_arch <= LIBXSMM_X86_AVX2 ) { io_micro_kernel_config->instruction_set = i_arch; io_micro_kernel_config->vector_reg_count = 16; io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c; io_micro_kernel_config->vector_name = 'y'; if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 4; io_micro_kernel_config->datatype_size = 8; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD; if ( i_arch == LIBXSMM_X86_AVX ) { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD; } else { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD; } } else { io_micro_kernel_config->vector_length = 8; io_micro_kernel_config->datatype_size = 4; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS; if ( i_arch == LIBXSMM_X86_AVX ) { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS; } else { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS; } } } else if ( i_arch <= LIBXSMM_X86_ALLFEAT ) { io_micro_kernel_config->instruction_set = i_arch; io_micro_kernel_config->vector_reg_count = 32; io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c; io_micro_kernel_config->vector_name = 'z'; if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 8; io_micro_kernel_config->datatype_size = 8; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD; if ( (i_use_masking_a_c == 0) ) { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD; } else { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVAPD; } } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD; } else if ( LIBXSMM_GEMM_PRECISION_F32 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 16; io_micro_kernel_config->datatype_size = 4; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; if ( (i_use_masking_a_c == 0) ) { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS; } else if ( LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { /* C is 32bit, so we treat all 3 matrices as 32bit element arrays */ io_micro_kernel_config->vector_length = 16; io_micro_kernel_config->datatype_size = 4; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; if ( (i_use_masking_a_c == 0) ) { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VPDPWSSD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VPADDD; } else if ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { /* C is 32bit, so we treat all 3 matrices as 32bit element arrays */ io_micro_kernel_config->vector_length = 16; io_micro_kernel_config->datatype_size = 4; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; if ( (i_use_masking_a_c == 0) ) { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VPDPBUSD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VPADDD; } else if ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { /* C is 32bit, so we treat all 3 matrices as 32bit element arrays */ io_micro_kernel_config->vector_length = 16; io_micro_kernel_config->datatype_size = 4; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VPBROADCASTD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; if ( (i_use_masking_a_c == 0) ) { io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VPXORD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VDPBF16PS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS; } else { /* shouldn't happen as we caught this case earlier */ io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC; io_micro_kernel_config->vector_reg_count = 0; io_micro_kernel_config->use_masking_a_c = 0; io_micro_kernel_config->vector_name = 'a'; io_micro_kernel_config->vector_length = 0; io_micro_kernel_config->datatype_size = 0; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } } else { /* that should no happen */ } io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1; io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ; io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ; io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ; io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL; io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ; } LIBXSMM_API_INTERN void libxsmm_generator_gemm_init_micro_kernel_config_halfvector( libxsmm_micro_kernel_config* io_micro_kernel_config, const unsigned int i_arch, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_use_masking_a_c ) { if ( (i_arch < LIBXSMM_X86_SSE3) || (i_arch > LIBXSMM_X86_ALLFEAT) ) { io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC; io_micro_kernel_config->vector_reg_count = 0; io_micro_kernel_config->use_masking_a_c = 0; io_micro_kernel_config->vector_name = 'a'; io_micro_kernel_config->vector_length = 0; io_micro_kernel_config->datatype_size = 0; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } else if ( i_arch <= LIBXSMM_X86_SSE4 ) { #if !defined(NDEBUG) fprintf(stderr, "LIBXSMM WARNING, libxsmm_generator_gemm_init_micro_kernel_config_halfvector, redirecting to scalar, please fix the generation code!!!\n"); #endif libxsmm_generator_gemm_init_micro_kernel_config_scalar( io_micro_kernel_config, i_arch, i_xgemm_desc, i_use_masking_a_c ); } else if ( i_arch <= LIBXSMM_X86_AVX2 ) { io_micro_kernel_config->instruction_set = LIBXSMM_X86_AVX; io_micro_kernel_config->vector_reg_count = 16; io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c; io_micro_kernel_config->vector_name = 'x'; if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 2; io_micro_kernel_config->datatype_size = 8; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVDDUP; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPD; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPD; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD; if ( i_arch == LIBXSMM_X86_AVX ) { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPD; } else { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } } else { io_micro_kernel_config->vector_length = 4; io_micro_kernel_config->datatype_size = 4; if ( (LIBXSMM_GEMM_FLAG_ALIGN_A & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; } else { io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VBROADCASTSS; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; if ( (LIBXSMM_GEMM_FLAG_ALIGN_C & i_xgemm_desc->flags) != 0 ) { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVAPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVNTPS; } else { io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVUPS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVUPS; } io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS; if ( i_arch == LIBXSMM_X86_AVX ) { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULPS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDPS; } else { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231PS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } } } else if ( i_arch <= LIBXSMM_X86_ALLFEAT ) { #if !defined(NDEBUG) fprintf(stderr, "LIBXSMM WARNING, libxsmm_generator_gemm_init_micro_kernel_config_halfvector, AVX512 redirecting to fullvector!\n"); #endif libxsmm_generator_gemm_init_micro_kernel_config_fullvector( io_micro_kernel_config, i_arch, i_xgemm_desc, i_use_masking_a_c ); } else { /* should not happen */ } io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1; io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ; io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ; io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ; io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL; io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ; } LIBXSMM_API_INTERN void libxsmm_generator_gemm_init_micro_kernel_config_scalar( libxsmm_micro_kernel_config* io_micro_kernel_config, const unsigned int i_arch, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_use_masking_a_c ) { if ( ( i_arch < LIBXSMM_X86_SSE3 ) || ( i_arch > LIBXSMM_X86_ALLFEAT ) ) { io_micro_kernel_config->instruction_set = LIBXSMM_X86_GENERIC; io_micro_kernel_config->vector_reg_count = 0; io_micro_kernel_config->use_masking_a_c = 0; io_micro_kernel_config->vector_name = 'a'; io_micro_kernel_config->vector_length = 0; io_micro_kernel_config->datatype_size = 0; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } else if ( i_arch <= LIBXSMM_X86_SSE4 ) { io_micro_kernel_config->instruction_set = LIBXSMM_X86_SSE3; io_micro_kernel_config->vector_reg_count = 16; io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c; io_micro_kernel_config->vector_name = 'x'; if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 1; io_micro_kernel_config->datatype_size = 8; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVSD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVSD; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPD; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULSD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDSD; } else { io_micro_kernel_config->vector_length = 1; io_micro_kernel_config->datatype_size = 4; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_MOVSS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_MOVSS; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_XORPS; io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_MULSS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_ADDSS; } } else if ( i_arch <= LIBXSMM_X86_ALLFEAT ) { io_micro_kernel_config->instruction_set = i_arch; io_micro_kernel_config->vector_reg_count = 16; io_micro_kernel_config->use_masking_a_c = i_use_masking_a_c; io_micro_kernel_config->vector_name = 'x'; if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { io_micro_kernel_config->vector_length = 1; io_micro_kernel_config->datatype_size = 8; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSD; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVSD; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPD; if ( i_arch == LIBXSMM_X86_AVX ) { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULSD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSD; } else { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SD; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } } else { io_micro_kernel_config->vector_length = 1; io_micro_kernel_config->datatype_size = 4; io_micro_kernel_config->a_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS; io_micro_kernel_config->b_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS; io_micro_kernel_config->b_shuff_instruction = LIBXSMM_X86_INSTR_UNDEF; io_micro_kernel_config->c_vmove_instruction = LIBXSMM_X86_INSTR_VMOVSS; io_micro_kernel_config->c_vmove_nts_instruction = LIBXSMM_X86_INSTR_VMOVSS; io_micro_kernel_config->vxor_instruction = LIBXSMM_X86_INSTR_VXORPS; if ( i_arch == LIBXSMM_X86_AVX ) { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VMULSS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_VADDSS; } else { io_micro_kernel_config->vmul_instruction = LIBXSMM_X86_INSTR_VFMADD231SS; io_micro_kernel_config->vadd_instruction = LIBXSMM_X86_INSTR_UNDEF; } } } else { /* should not happen */ } io_micro_kernel_config->prefetch_instruction = LIBXSMM_X86_INSTR_PREFETCHT1; io_micro_kernel_config->alu_add_instruction = LIBXSMM_X86_INSTR_ADDQ; io_micro_kernel_config->alu_sub_instruction = LIBXSMM_X86_INSTR_SUBQ; io_micro_kernel_config->alu_cmp_instruction = LIBXSMM_X86_INSTR_CMPQ; io_micro_kernel_config->alu_jmp_instruction = LIBXSMM_X86_INSTR_JL; io_micro_kernel_config->alu_mov_instruction = LIBXSMM_X86_INSTR_MOVQ; } LIBXSMM_API_INTERN void libxsmm_generator_gemm_add_flop_counter( libxsmm_generated_code* io_generated_code, const libxsmm_gemm_descriptor* i_xgemm_desc ) { if ( io_generated_code->code_type == 0 ) { char l_new_code[512]; const unsigned int l_max_code_length = sizeof(l_new_code) - 1; int l_code_length = 0; l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#ifndef NDEBUG\n" ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#ifdef _OPENMP\n" ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#pragma omp atomic\n" ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#endif\n" ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "libxsmm_num_total_flops += %u;\n", 2u * i_xgemm_desc->m * i_xgemm_desc->n * i_xgemm_desc->k); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); l_code_length = LIBXSMM_SNPRINTF( l_new_code, l_max_code_length, "#endif\n" ); libxsmm_append_code_as_string( io_generated_code, l_new_code, l_code_length ); } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_header_kloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const unsigned int i_m_blocking, const unsigned int i_k_blocking ) { LIBXSMM_UNUSED(i_m_blocking); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_kloop, 0); libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_kloop, i_k_blocking); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_footer_kloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_m_blocking, const unsigned int i_max_blocked_k, const unsigned int i_kloop_complete ) { LIBXSMM_UNUSED(i_m_blocking); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_kloop, i_max_blocked_k ); libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker ); if ( i_kloop_complete != 0 ) { int l_b_offset = 0; if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) { l_b_offset = i_xgemm_desc->ldb * i_xgemm_desc->k * i_micro_kernel_config->datatype_size; } else { l_b_offset = i_xgemm_desc->k * i_micro_kernel_config->datatype_size; } libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_b, l_b_offset ); } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_header_reduceloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_reduce_loop, 0); libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_footer_reduceloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const libxsmm_gemm_descriptor* i_xgemm_desc) { LIBXSMM_UNUSED(i_xgemm_desc); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_reduce_loop, 1); libxsmm_x86_instruction_alu_reg( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_reduce_count, i_gp_reg_mapping->gp_reg_reduce_loop); libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_header_nloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const unsigned int i_n_blocking) { libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_nloop, i_n_blocking ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_mloop, 0 ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_footer_nloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_n_blocking, const unsigned int i_n_done ) { if ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c, (i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size/2)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size/2)) ); } else if ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c, (i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size/4)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size/4)) ); } else { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c, (i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) ); } /* B prefetch */ if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ) { if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) { unsigned int l_type_scaling; if ( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) || (LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) ) { l_type_scaling = 2; } else if ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { l_type_scaling = 4; } else { l_type_scaling = 1; } libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_b_prefetch, (i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size/l_type_scaling)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size/l_type_scaling)) ); } } #if 0 if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c_prefetch, (i_n_blocking*(i_xgemm_desc->ldc)*(i_micro_kernel_config->datatype_size)) - ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) ); } #endif if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) { /* handle trans B */ int l_b_offset = 0; if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) { l_b_offset = i_n_blocking * i_micro_kernel_config->datatype_size; } else { l_b_offset = i_n_blocking * i_xgemm_desc->ldb * i_micro_kernel_config->datatype_size; } libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 ); libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop ); libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 1 ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_b, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_help_0, l_b_offset ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_b, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 1 ); if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ) { libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a_prefetch, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a_prefetch, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 1 ); } libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop ); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 ); } else { /* handle trans B */ int l_b_offset = 0; if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) > 0 ) { l_b_offset = i_n_blocking * i_micro_kernel_config->datatype_size; } else { l_b_offset = i_n_blocking * i_xgemm_desc->ldb * i_micro_kernel_config->datatype_size; } libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_b, l_b_offset ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) ); if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a_prefetch, ((i_xgemm_desc->m)*(i_micro_kernel_config->datatype_size)) ); } } libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_nloop, i_n_done ); libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_header_mloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const unsigned int i_m_blocking ) { libxsmm_x86_instruction_register_jump_back_label( io_generated_code, io_loop_label_tracker ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_mloop, i_m_blocking ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_footer_mloop( libxsmm_generated_code* io_generated_code, libxsmm_loop_label_tracker* io_loop_label_tracker, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_m_blocking, const unsigned int i_m_done ) { /* advance C pointer */ if ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c, i_m_blocking*(i_micro_kernel_config->datatype_size/2) ); } else if ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c, i_m_blocking*(i_micro_kernel_config->datatype_size/4) ); } else { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c, i_m_blocking*(i_micro_kernel_config->datatype_size) ); } /* C prefetch */ #if 0 if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_c_prefetch, i_m_blocking*(i_micro_kernel_config->datatype_size) ); } #endif /* B prefetch */ if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ) { if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) { unsigned int l_type_scaling; if ( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) || (LIBXSMM_GEMM_PRECISION_I16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype )) ) { l_type_scaling = 2; } else if ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { l_type_scaling = 4; } else { l_type_scaling = 1; } libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_add_instruction, i_gp_reg_mapping->gp_reg_b_prefetch, i_m_blocking*(i_micro_kernel_config->datatype_size/l_type_scaling) ); } } /* A prefetch */ if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C) { if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) { if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2 ) { libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 ); libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop ); libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a_prefetch, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0, ((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size)) ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a_prefetch, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 1 ); libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop ); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 ); } } else { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a_prefetch, ((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size)) ); } } /* advance A pointer */ if (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_ADDRESS) { libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 ); libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop ); libxsmm_generator_gemm_header_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 0 ); libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_help_0, ((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size)) ); libxsmm_x86_instruction_alu_mem( io_generated_code, i_micro_kernel_config->alu_mov_instruction, i_gp_reg_mapping->gp_reg_a, i_gp_reg_mapping->gp_reg_reduce_loop, 8, 0, i_gp_reg_mapping->gp_reg_help_0, 1 ); libxsmm_generator_gemm_footer_reduceloop( io_generated_code, io_loop_label_tracker, i_gp_reg_mapping, i_micro_kernel_config, i_xgemm_desc); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_reduce_loop ); libxsmm_x86_instruction_pop_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_0 ); } else { libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_sub_instruction, i_gp_reg_mapping->gp_reg_a, ((i_xgemm_desc->k) * (i_micro_kernel_config->datatype_size) * (i_xgemm_desc->lda) ) - (i_m_blocking * (i_micro_kernel_config->datatype_size)) ); } /* loop handling */ libxsmm_x86_instruction_alu_imm( io_generated_code, i_micro_kernel_config->alu_cmp_instruction, i_gp_reg_mapping->gp_reg_mloop, i_m_done ); libxsmm_x86_instruction_jump_back_to_label( io_generated_code, i_micro_kernel_config->alu_jmp_instruction, io_loop_label_tracker ); } LIBXSMM_API_INTERN void libxsmm_generator_gemm_load_C( libxsmm_generated_code* io_generated_code, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_m_blocking, const unsigned int i_n_blocking ) { unsigned int l_m_blocking, l_vec_reg_acc_start; /* register blocking counter in n */ unsigned int l_n = 0; /* register blocking counter in m */ unsigned int l_m = 0; assert(0 < i_micro_kernel_config->vector_length); /* deriving register blocking from kernel config */ l_m_blocking = ( i_m_blocking % i_micro_kernel_config->vector_length == 0 ) ? i_m_blocking/i_micro_kernel_config->vector_length : (i_m_blocking/i_micro_kernel_config->vector_length)+1; /* start register of accumulator */ l_vec_reg_acc_start = i_micro_kernel_config->vector_reg_count - (i_n_blocking * l_m_blocking); #if !defined(NDEBUG) /* Do some test if it is possible to generate the requested code. This is not done in release mode and therefore bad things might happen.... HUAAH */ if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE3 || i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX || i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX2 ) { if ( (i_n_blocking > 3) || (i_n_blocking < 1) || (i_m_blocking < 1) ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK ); return; } } else if ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512_CORE ) { if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (l_m_blocking != 1) ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK ); return; } } else if ( i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE ) { if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (l_m_blocking < 1) || (l_m_blocking > 6) ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK ); return; } } else {} #if 0 if ( i_m_blocking % i_micro_kernel_config->vector_length != 0 ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_M_BLOCK ); return; } #endif #endif /*!defined(NDEBUG)*/ /* load C accumulator */ if (0 == (LIBXSMM_GEMM_FLAG_BETA_0 & i_xgemm_desc->flags)) { /* Beta=1 */ /* pure BF16 kernel */ if ( ( (i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE) && (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT) ) && ( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) { /* we add when scaling during conversion to FP32 */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { /* load 16 bit values into ymm portion of the register */ if ( (i_micro_kernel_config->use_masking_a_c != 0) && ( l_m == (l_m_blocking - 1) ) ) { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMOVDQU16, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2), 'z', 0, 2, 1, 0 ); } else { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, i_micro_kernel_config->c_vmove_instruction, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2), 'y', 0, 0, 1, 0 ); } /* convert 16 bit values into 32 bit (integer convert) */ libxsmm_x86_instruction_vec_compute_convert( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VPMOVSXWD, i_micro_kernel_config->vector_name, 0, LIBXSMM_X86_VEC_REG_UNDEF, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), LIBXSMM_X86_VEC_REG_UNDEF); /* shift 16 bits to the left to generate valid FP32 numbers */ libxsmm_x86_instruction_vec_shuffle_reg(io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VPSLLD, i_micro_kernel_config->vector_name, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), LIBXSMM_X86_VEC_REG_UNDEF, 16); } } /* pure int8 kernel */ } else if ( ( (i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE) && (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT) ) && ( (LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) { /* we need to up convert int8 to int32 */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { /* load 16 bit values into xmm portion of the register */ if ( (i_micro_kernel_config->use_masking_a_c != 0) && ( l_m == (l_m_blocking - 1) ) ) { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMOVDQU8, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/4), 'z', 0, 2, 1, 0 ); } else { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, i_micro_kernel_config->c_vmove_instruction, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/4), 'x', 0, 0, 1, 0 ); } /* convert 8 bit values into 32 bit (integer convert) */ if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_C_UNSIGNED) != 0 ) { libxsmm_x86_instruction_vec_compute_convert( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VPMOVZXBD, i_micro_kernel_config->vector_name, 0, LIBXSMM_X86_VEC_REG_UNDEF, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), LIBXSMM_X86_VEC_REG_UNDEF); } else { libxsmm_x86_instruction_vec_compute_convert( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VPMOVSXBD, i_micro_kernel_config->vector_name, 0, LIBXSMM_X86_VEC_REG_UNDEF, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), LIBXSMM_X86_VEC_REG_UNDEF); } } } } else { /* adding to C, so let's load C */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { /* we only mask the last m-blocked load */ libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, i_micro_kernel_config->c_vmove_instruction, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size), i_micro_kernel_config->vector_name, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), ( l_m == (l_m_blocking - 1) ) ? i_micro_kernel_config->use_masking_a_c : 0, 1, 0 ); } #if 0 if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) { for (l_m = 0; l_m < l_m_blocking; l_m += l_m++ ) { libxsmm_x86_instruction_prefetch( io_generated_code, i_micro_kernel_config->prefetch_instruction, i_gp_reg_mapping->gp_reg_c_prefetch, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size)); } } #endif } } } else { /* overwriting C, so let's xout accumulator */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { libxsmm_x86_instruction_vec_compute_reg( io_generated_code, i_micro_kernel_config->instruction_set, i_micro_kernel_config->vxor_instruction, i_micro_kernel_config->vector_name, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), l_vec_reg_acc_start + l_m + (l_m_blocking * l_n) ); } #if 0 if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_CL2 || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2CL2BL2_VIA_C ) { for (l_m = 0; l_m < l_m_blocking; l_m += l_m++ ) { libxsmm_x86_instruction_prefetch( io_generated_code, i_micro_kernel_config->prefetch_instruction, i_gp_reg_mapping->gp_reg_c_prefetch, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size)); } } #endif } } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_store_C( libxsmm_generated_code* io_generated_code, const libxsmm_gp_reg_mapping* i_gp_reg_mapping, const libxsmm_micro_kernel_config* i_micro_kernel_config, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_m_blocking, const unsigned int i_n_blocking ) { /* deriving register blocking from kernel config */ unsigned int l_m_blocking = ( i_m_blocking % i_micro_kernel_config->vector_length == 0 ) ? i_m_blocking/i_micro_kernel_config->vector_length : (i_m_blocking/i_micro_kernel_config->vector_length)+1; /* register blocking counter in n */ unsigned int l_n = 0; /* register blocking counter in m */ unsigned int l_m = 0; /* start register of accumulator */ unsigned int l_vec_reg_acc_start = i_micro_kernel_config->vector_reg_count - (i_n_blocking * l_m_blocking); /* select store instruction */ unsigned int l_vstore = (LIBXSMM_GEMM_FLAG_ALIGN_C_NTS_HINT == (LIBXSMM_GEMM_FLAG_ALIGN_C_NTS_HINT & i_xgemm_desc->flags)) ? i_micro_kernel_config->c_vmove_nts_instruction : i_micro_kernel_config->c_vmove_instruction; /* @TODO fix this test */ #if !defined(NDEBUG) if (i_micro_kernel_config->instruction_set == LIBXSMM_X86_SSE3 || i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX || i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX2 ) { if ( (i_n_blocking > 3) || (i_n_blocking < 1) || (i_m_blocking < 1) ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK ); return; } } else if ( i_micro_kernel_config->instruction_set < LIBXSMM_X86_AVX512_CORE ) { if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (i_m_blocking != i_micro_kernel_config->vector_length) ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK ); return; } } else if ( i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE ) { if ( (i_n_blocking > 30) || (i_n_blocking < 1) || (l_m_blocking < 1) || (l_m_blocking > 6) ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_REG_BLOCK ); return; } } else {} #if 0 if ( i_m_blocking % i_micro_kernel_config->vector_length != 0 ) { LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_M_BLOCK ); return; } #endif #endif if ( ( (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CORE) || (i_micro_kernel_config->instruction_set == LIBXSMM_X86_AVX512_CLX) ) && ( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) { #if 0 /* push 0x7f800000 on the stack, naninf masking */ libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_5, 0x7f800000); libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_5 ); /* push 0x00010000 on the stack, fixup masking */ libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_5, 0x00010000); libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_5 ); /* push 0x00007fff on the stack, rneadd */ libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_5, 0x00007fff); libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_5 ); /* push 0x00000001 on the stack, fixup */ libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_mapping->gp_reg_help_5, 0x00000001); libxsmm_x86_instruction_push_reg( io_generated_code, i_gp_reg_mapping->gp_reg_help_5 ); #endif /* storing downconverted and rounded C accumulator */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n); libxsmm_x86_instruction_vec_shuffle_reg(io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VPSRAD, i_micro_kernel_config->vector_name, reg_X, reg_X, LIBXSMM_X86_VEC_REG_UNDEF, 16); libxsmm_x86_instruction_vec_compute_convert( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VPMOVDW, i_micro_kernel_config->vector_name, reg_X, LIBXSMM_X86_VEC_REG_UNDEF, 0, LIBXSMM_X86_VEC_REG_UNDEF); /* store 16 bit values into ymm portion of the register */ if ( (i_micro_kernel_config->use_masking_a_c != 0) && ( l_m == (l_m_blocking - 1) ) ) { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMOVDQU16, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2), 'z', 0, 2, 0, 1 ); } else { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, l_vstore, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2), 'y', 0, 0, 0, 1 ); } } } } else if ( ( (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT) || (i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CPX) ) && ( (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) { /* storing downconverted and rounded C accumulator */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { unsigned int l_m_2_blocking = (l_m_blocking/2)*2; l_m = 0; if ( i_micro_kernel_config->use_masking_a_c != 0 ) { for ( l_m = 0 ; l_m < l_m_blocking; l_m++ ) { unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n); libxsmm_x86_instruction_vec_compute_convert( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VCVTNEPS2BF16, i_micro_kernel_config->vector_name, reg_X, LIBXSMM_X86_VEC_REG_UNDEF, 0, 0); /* store 16 bit values into ymm portion of the register */ if ( l_m == (l_m_blocking - 1) ) { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMOVDQU16, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2), 'z', 0, 2, 0, 1 ); } else { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, l_vstore, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2), 'y', 0, 0, 0, 1 ); } } } else { for (; l_m < l_m_2_blocking; l_m+=2 ) { unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n); unsigned int reg_X2 = l_vec_reg_acc_start + l_m+1 + (l_m_blocking * l_n); libxsmm_x86_instruction_vec_compute_convert( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VCVTNE2PS2BF16, i_micro_kernel_config->vector_name, reg_X, reg_X2, 0, 0); libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, l_vstore, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2), 'z', 0, 0, 0, 1 ); } for (; l_m < l_m_blocking; l_m++ ) { unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n); libxsmm_x86_instruction_vec_compute_convert( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VCVTNEPS2BF16, i_micro_kernel_config->vector_name, reg_X, LIBXSMM_X86_VEC_REG_UNDEF, 0, 0); libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, l_vstore, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/2), 'y', 0, 0, 0, 1 ); } } } } else if ( ( (i_micro_kernel_config->instruction_set <= LIBXSMM_X86_ALLFEAT) || (i_micro_kernel_config->instruction_set >= LIBXSMM_X86_AVX512_CORE) ) && ( (LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && (LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) ) { /* pick the right instrucitons */ unsigned int inst_f32_i32 = ( ( i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_C_UNSIGNED ) != 0 ) ? LIBXSMM_X86_INSTR_VCVTPS2UDQ : LIBXSMM_X86_INSTR_VCVTPS2DQ; unsigned int inst_i32_i8 = ( ( i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_C_UNSIGNED ) != 0 ) ? LIBXSMM_X86_INSTR_VPMOVUSDB : LIBXSMM_X86_INSTR_VPMOVSDB; /* there are case where we need to load the scaling factor's address from the stack argument list */ if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_BATCH_REDUCE_OFFSET) != 0 ) { libxsmm_x86_instruction_load_arg_to_reg( io_generated_code, 0, i_gp_reg_mapping->gp_reg_scf ); } /* loading scf into register 3 */ libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VBROADCASTSS, i_gp_reg_mapping->gp_reg_scf, LIBXSMM_X86_GP_REG_UNDEF, 0, 0, i_micro_kernel_config->vector_name, 3, 0, 1, 0 ); /* Zero out register 0 to perform relu */ libxsmm_x86_instruction_vec_compute_reg( io_generated_code, i_micro_kernel_config->instruction_set, i_micro_kernel_config->vxor_instruction, i_micro_kernel_config->vector_name, 0, 0, 0); /* storing downconverted and rounded C accumulator */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { unsigned int reg_X = l_vec_reg_acc_start + l_m + (l_m_blocking * l_n); /* Convert result to F32 */ libxsmm_x86_instruction_vec_compute_reg( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VCVTDQ2PS, i_micro_kernel_config->vector_name, reg_X, reg_X, LIBXSMM_X86_VEC_REG_UNDEF); /* Multiply with scaling factor */ libxsmm_x86_instruction_vec_compute_reg( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMULPS, i_micro_kernel_config->vector_name, reg_X, 3, reg_X ); /* Perform RELU */ libxsmm_x86_instruction_vec_compute_reg( io_generated_code, i_micro_kernel_config->instruction_set, LIBXSMM_X86_INSTR_VMAXPS, i_micro_kernel_config->vector_name, reg_X, 0, reg_X); /* Round result to int32 */ libxsmm_x86_instruction_vec_compute_convert( io_generated_code, i_micro_kernel_config->instruction_set, inst_f32_i32, i_micro_kernel_config->vector_name, reg_X, LIBXSMM_X86_VEC_REG_UNDEF, reg_X, 0); /* down-convert to int8 */ libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, inst_i32_i8, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size/4), i_micro_kernel_config->vector_name, reg_X, ( ( l_m == (l_m_blocking - 1)) && ( i_micro_kernel_config->use_masking_a_c != 0 ) ) ? 2 : 0, 0, 1 ); } } } else { /* storing C accumulator */ for ( l_n = 0; l_n < i_n_blocking; l_n++ ) { for ( l_m = 0; l_m < l_m_blocking; l_m++ ) { libxsmm_x86_instruction_vec_move( io_generated_code, i_micro_kernel_config->instruction_set, l_vstore, i_gp_reg_mapping->gp_reg_c, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size), i_micro_kernel_config->vector_name, l_vec_reg_acc_start + l_m + (l_m_blocking * l_n), ( l_m == (l_m_blocking - 1) ) ? i_micro_kernel_config->use_masking_a_c : 0, 0, 1 ); } if ( i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C || i_xgemm_desc->prefetch == LIBXSMM_GEMM_PREFETCH_AL2BL2_VIA_C_AHEAD ) { if ( (i_xgemm_desc->flags & LIBXSMM_GEMM_FLAG_TRANS_B) == 0 ) { /* determining how many prefetches we need in M direction as we just need one prefetch per cache line */ unsigned int l_m_advance = 64 / ((i_micro_kernel_config->vector_length) * (i_micro_kernel_config->datatype_size)); /* 64: hardcoded cache line length */ for (l_m = 0; l_m < l_m_blocking; l_m += l_m_advance ) { libxsmm_x86_instruction_prefetch( io_generated_code, i_micro_kernel_config->prefetch_instruction, i_gp_reg_mapping->gp_reg_b_prefetch, LIBXSMM_X86_GP_REG_UNDEF, 0, ((l_n * i_xgemm_desc->ldc) + (l_m * (i_micro_kernel_config->vector_length))) * (i_micro_kernel_config->datatype_size)); } } } } } } LIBXSMM_API_INTERN void libxsmm_generator_gemm_initialize_avx512_mask( libxsmm_generated_code* io_generated_code, const unsigned int i_gp_reg_tmp, const libxsmm_gemm_descriptor* i_xgemm_desc, const unsigned int i_mask_count ) { unsigned int l_mask; /* init full mask */ if ( LIBXSMM_GEMM_PRECISION_F64 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) { l_mask = 0xff; } else { l_mask = 0xffff; } /* shift right by "inverse" remainder */ l_mask = l_mask >> i_mask_count; /* move mask to GP register */ libxsmm_x86_instruction_alu_imm( io_generated_code, LIBXSMM_X86_INSTR_MOVQ, i_gp_reg_tmp, l_mask ); if ( ( io_generated_code->arch >= LIBXSMM_X86_AVX512 ) && ( io_generated_code->arch <= LIBXSMM_X86_ALLFEAT ) ) { libxsmm_x86_instruction_mask_move( io_generated_code, LIBXSMM_X86_INSTR_KMOVW, i_gp_reg_tmp, LIBXSMM_X86_AVX512_MASK ); if ( ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && ( LIBXSMM_GEMM_PRECISION_BF16 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) { libxsmm_x86_instruction_mask_move( io_generated_code, LIBXSMM_X86_INSTR_KMOVD, i_gp_reg_tmp, 2 ); } else if ( ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_INP( i_xgemm_desc->datatype ) ) && ( LIBXSMM_GEMM_PRECISION_I8 == LIBXSMM_GETENUM_OUT( i_xgemm_desc->datatype ) ) ) { libxsmm_x86_instruction_mask_move( io_generated_code, LIBXSMM_X86_INSTR_KMOVQ, i_gp_reg_tmp, 2 ); } else { /* no addtional mask is needed */ } } else { /* shouldn't happen */ LIBXSMM_HANDLE_ERROR( io_generated_code, LIBXSMM_ERR_ARCH ); return; } }
symgs.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ void smooth(level_type * level, int phi_id, int rhs_id, double a, double b){ int box,s; for(s=0;s<2*NUM_SMOOTHS;s++){ // there are two sweeps (forward/backward) per GS smooth exchange_boundary(level,phi_id,stencil_get_shape()); apply_BCs(level,phi_id,stencil_get_shape()); double _timeStart = getTime(); #ifdef _OPENMP #pragma omp parallel for private(box) #endif for(box=0;box<level->num_my_boxes;box++){ int i,j,k; const int ghosts = level->box_ghosts; const int jStride = level->my_boxes[box].jStride; const int kStride = level->my_boxes[box].kStride; const int dim = level->my_boxes[box].dim; const double h2inv = 1.0/(level->h*level->h); double * __restrict__ phi = level->my_boxes[box].vectors[ phi_id] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point const double * __restrict__ rhs = level->my_boxes[box].vectors[ rhs_id] + ghosts*(1+jStride+kStride); const double * __restrict__ alpha = level->my_boxes[box].vectors[VECTOR_ALPHA ] + ghosts*(1+jStride+kStride); const double * __restrict__ beta_i = level->my_boxes[box].vectors[VECTOR_BETA_I] + ghosts*(1+jStride+kStride); const double * __restrict__ beta_j = level->my_boxes[box].vectors[VECTOR_BETA_J] + ghosts*(1+jStride+kStride); const double * __restrict__ beta_k = level->my_boxes[box].vectors[VECTOR_BETA_K] + ghosts*(1+jStride+kStride); const double * __restrict__ Dinv = level->my_boxes[box].vectors[VECTOR_DINV ] + ghosts*(1+jStride+kStride); if( (s&0x1)==0 ){ // forward sweep... hard to thread for(k=0;k<dim;k++){ for(j=0;j<dim;j++){ for(i=0;i<dim;i++){ int ijk = i + j*jStride + k*kStride; double Ax = apply_op_ijk(phi); phi[ijk] = phi[ijk] + Dinv[ijk]*(rhs[ijk]-Ax); }}} }else{ // backward sweep... hard to thread for(k=dim-1;k>=0;k--){ for(j=dim-1;j>=0;j--){ for(i=dim-1;i>=0;i--){ int ijk = i + j*jStride + k*kStride; double Ax = apply_op_ijk(phi); phi[ijk] = phi[ijk] + Dinv[ijk]*(rhs[ijk]-Ax); }}} } } // boxes level->timers.smooth += (double)(getTime()-_timeStart); } // s-loop } //------------------------------------------------------------------------------------------------------------------------------
par_mgr.c
/****************************************************************************** * Copyright (c) 1998 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Two-grid system solver * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "par_amg.h" #include "par_mgr.h" #ifdef HYPRE_USING_DSUPERLU #include "dsuperlu.h" #endif #if defined(HYPRE_USING_CUDA) void hypre_NoGPUSupport(char *option) { char msg[256]; hypre_sprintf(msg, "Error: Chosen %s option is not currently supported on GPU\n\n", option); hypre_printf("%s ", msg); // hypre_error_w_msg(1, msg); hypre_MPI_Abort(hypre_MPI_COMM_WORLD, -1); } #endif /* Create */ void * hypre_MGRCreate() { hypre_ParMGRData *mgr_data; mgr_data = hypre_CTAlloc(hypre_ParMGRData, 1, HYPRE_MEMORY_HOST); /* block data */ (mgr_data -> block_size) = 1; (mgr_data -> block_num_coarse_indexes) = NULL; (mgr_data -> point_marker_array) = NULL; (mgr_data -> block_cf_marker) = NULL; /* general data */ (mgr_data -> max_num_coarse_levels) = 10; (mgr_data -> A_array) = NULL; #if defined(HYPRE_USING_CUDA) (mgr_data -> P_FF_array) = NULL; #endif (mgr_data -> P_array) = NULL; (mgr_data -> RT_array) = NULL; (mgr_data -> RAP) = NULL; (mgr_data -> CF_marker_array) = NULL; (mgr_data -> coarse_indices_lvls) = NULL; (mgr_data -> A_ff_array) = NULL; (mgr_data -> F_fine_array) = NULL; (mgr_data -> U_fine_array) = NULL; (mgr_data -> aff_solver) = NULL; (mgr_data -> fine_grid_solver_setup) = NULL; (mgr_data -> fine_grid_solver_solve) = NULL; (mgr_data -> F_array) = NULL; (mgr_data -> U_array) = NULL; (mgr_data -> residual) = NULL; (mgr_data -> rel_res_norms) = NULL; (mgr_data -> Vtemp) = NULL; (mgr_data -> Ztemp) = NULL; (mgr_data -> Utemp) = NULL; (mgr_data -> Ftemp) = NULL; (mgr_data -> num_iterations) = 0; (mgr_data -> num_interp_sweeps) = 1; (mgr_data -> num_restrict_sweeps) = 1; (mgr_data -> trunc_factor) = 0.0; (mgr_data -> max_row_sum) = 0.9; (mgr_data -> strong_threshold) = 0.25; (mgr_data -> P_max_elmts) = 0; (mgr_data -> coarse_grid_solver) = NULL; (mgr_data -> coarse_grid_solver_setup) = NULL; (mgr_data -> coarse_grid_solver_solve) = NULL; (mgr_data -> global_smoother) = NULL; (mgr_data -> use_default_cgrid_solver) = 1; (mgr_data -> fsolver_mode) = -1; // set to -1 to avoid printing when not used (mgr_data -> omega) = 1.; (mgr_data -> max_iter) = 20; (mgr_data -> tol) = 1.0e-6; (mgr_data -> relax_type) = 0; (mgr_data -> relax_order) = 1; // not fully utilized. Only used to compute L1-norms. (mgr_data -> interp_type) = NULL; (mgr_data -> restrict_type) = NULL; (mgr_data -> num_relax_sweeps) = 1; (mgr_data -> relax_weight) = 1.0; (mgr_data -> logging) = 0; (mgr_data -> print_level) = 0; (mgr_data -> frelax_print_level) = 0; (mgr_data -> cg_print_level) = 0; (mgr_data -> l1_norms) = NULL; (mgr_data -> reserved_coarse_size) = 0; (mgr_data -> reserved_coarse_indexes) = NULL; (mgr_data -> reserved_Cpoint_local_indexes) = NULL; (mgr_data -> diaginv) = NULL; (mgr_data -> global_smooth_iters) = 1; (mgr_data -> global_smooth_type) = 0; (mgr_data -> set_non_Cpoints_to_F) = 0; (mgr_data -> idx_array) = NULL; (mgr_data -> Frelax_method) = NULL; (mgr_data -> VcycleRelaxVtemp) = NULL; (mgr_data -> VcycleRelaxZtemp) = NULL; (mgr_data -> FrelaxVcycleData) = NULL; (mgr_data -> Frelax_num_functions) = NULL; (mgr_data -> max_local_lvls) = 10; (mgr_data -> use_non_galerkin_cg) = NULL; (mgr_data -> print_coarse_system) = 0; (mgr_data -> set_c_points_method) = 0; (mgr_data -> lvl_to_keep_cpoints) = 0; (mgr_data -> cg_convergence_factor) = 0.0; (mgr_data -> truncate_coarse_grid_threshold) = 0.0; return (void *) mgr_data; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ /* Destroy */ HYPRE_Int hypre_MGRDestroy( void *data ) { hypre_ParMGRData * mgr_data = (hypre_ParMGRData*) data; HYPRE_Int i; HYPRE_Int num_coarse_levels = (mgr_data -> num_coarse_levels); /* block info data */ if ((mgr_data -> block_cf_marker)) { for (i = 0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); } } hypre_TFree((mgr_data -> block_cf_marker), HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if (mgr_data -> block_num_coarse_indexes) { hypre_TFree(mgr_data -> block_num_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* final residual vector */ if ((mgr_data -> residual)) { hypre_ParVectorDestroy( (mgr_data -> residual) ); (mgr_data -> residual) = NULL; } if ((mgr_data -> rel_res_norms)) { hypre_TFree( (mgr_data -> rel_res_norms), HYPRE_MEMORY_HOST); (mgr_data -> rel_res_norms) = NULL; } /* temp vectors for solve phase */ if ((mgr_data -> Vtemp)) { hypre_ParVectorDestroy( (mgr_data -> Vtemp) ); (mgr_data -> Vtemp) = NULL; } if ((mgr_data -> Ztemp)) { hypre_ParVectorDestroy( (mgr_data -> Ztemp) ); (mgr_data -> Ztemp) = NULL; } if ((mgr_data -> Utemp)) { hypre_ParVectorDestroy( (mgr_data -> Utemp) ); (mgr_data -> Utemp) = NULL; } if ((mgr_data -> Ftemp)) { hypre_ParVectorDestroy( (mgr_data -> Ftemp) ); (mgr_data -> Ftemp) = NULL; } /* coarse grid solver */ if ((mgr_data -> use_default_cgrid_solver)) { if ((mgr_data -> coarse_grid_solver)) { hypre_BoomerAMGDestroy( (mgr_data -> coarse_grid_solver) ); } (mgr_data -> coarse_grid_solver) = NULL; } /* l1_norms */ if ((mgr_data -> l1_norms)) { for (i = 0; i < (num_coarse_levels); i++) { hypre_SeqVectorDestroy((mgr_data -> l1_norms)[i]); } hypre_TFree((mgr_data -> l1_norms), HYPRE_MEMORY_HOST); } /* coarse_indices_lvls */ if ((mgr_data -> coarse_indices_lvls)) { for (i = 0; i < (num_coarse_levels); i++) if ((mgr_data -> coarse_indices_lvls)[i]) { hypre_TFree((mgr_data -> coarse_indices_lvls)[i], HYPRE_MEMORY_HOST); } hypre_TFree((mgr_data -> coarse_indices_lvls), HYPRE_MEMORY_HOST); } /* linear system and cf marker array */ if (mgr_data -> A_array || mgr_data -> P_array || mgr_data -> RT_array || mgr_data -> CF_marker_array) { for (i = 1; i < num_coarse_levels + 1; i++) { hypre_ParVectorDestroy((mgr_data -> F_array)[i]); hypre_ParVectorDestroy((mgr_data -> U_array)[i]); if ((mgr_data -> P_array)[i - 1]) { hypre_ParCSRMatrixDestroy((mgr_data -> P_array)[i - 1]); } if ((mgr_data -> RT_array)[i - 1]) { hypre_ParCSRMatrixDestroy((mgr_data -> RT_array)[i - 1]); } hypre_IntArrayDestroy(mgr_data -> CF_marker_array[i - 1]); } for (i = 1; i < (num_coarse_levels); i++) { if ((mgr_data -> A_array)[i]) { hypre_ParCSRMatrixDestroy((mgr_data -> A_array)[i]); } } } #if defined(HYPRE_USING_CUDA) if (mgr_data -> P_FF_array) { for (i = 0; i < num_coarse_levels; i++) { if ((mgr_data -> P_array)[i]) { hypre_ParCSRMatrixDestroy((mgr_data -> P_FF_array)[i]); } } //hypre_TFree(P_FF_array, hypre_HandleMemoryLocation(hypre_handle())); hypre_TFree((mgr_data -> P_FF_array), HYPRE_MEMORY_HOST); } #endif /* AMG for Frelax */ if (mgr_data -> A_ff_array || mgr_data -> F_fine_array || mgr_data -> U_fine_array) { for (i = 1; i < num_coarse_levels + 1; i++) { if (mgr_data -> F_fine_array[i]) { hypre_ParVectorDestroy((mgr_data -> F_fine_array)[i]); } if (mgr_data -> U_fine_array[i]) { hypre_ParVectorDestroy((mgr_data -> U_fine_array)[i]); } } for (i = 1; i < (num_coarse_levels); i++) { if ((mgr_data -> A_ff_array)[i]) { hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[i]); } } if (mgr_data -> fsolver_mode > 0) { if ((mgr_data -> A_ff_array)[0]) { hypre_ParCSRMatrixDestroy((mgr_data -> A_ff_array)[0]); } } hypre_TFree(mgr_data -> F_fine_array, HYPRE_MEMORY_HOST); (mgr_data -> F_fine_array) = NULL; hypre_TFree(mgr_data -> U_fine_array, HYPRE_MEMORY_HOST); (mgr_data -> U_fine_array) = NULL; hypre_TFree(mgr_data -> A_ff_array, HYPRE_MEMORY_HOST); (mgr_data -> A_ff_array) = NULL; } if (mgr_data -> aff_solver) { for (i = 1; i < (num_coarse_levels); i++) { if ((mgr_data -> aff_solver)[i]) { hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[i]); } } if (mgr_data -> fsolver_mode == 2) { if ((mgr_data -> aff_solver)[0]) { hypre_BoomerAMGDestroy((mgr_data -> aff_solver)[0]); } } hypre_TFree(mgr_data -> aff_solver, HYPRE_MEMORY_HOST); (mgr_data -> aff_solver) = NULL; } if ((mgr_data -> F_array)) { hypre_TFree((mgr_data -> F_array), HYPRE_MEMORY_HOST); (mgr_data -> F_array) = NULL; } if ((mgr_data -> U_array)) { hypre_TFree((mgr_data -> U_array), HYPRE_MEMORY_HOST); (mgr_data -> U_array) = NULL; } if ((mgr_data -> A_array)) { hypre_TFree((mgr_data -> A_array), HYPRE_MEMORY_HOST); (mgr_data -> A_array) = NULL; } if ((mgr_data -> P_array)) { hypre_TFree((mgr_data -> P_array), HYPRE_MEMORY_HOST); (mgr_data -> P_array) = NULL; } if ((mgr_data -> RT_array)) { hypre_TFree((mgr_data -> RT_array), HYPRE_MEMORY_HOST); (mgr_data -> RT_array) = NULL; } if ((mgr_data -> CF_marker_array)) { hypre_TFree((mgr_data -> CF_marker_array), HYPRE_MEMORY_HOST); (mgr_data -> CF_marker_array) = NULL; } if ((mgr_data -> reserved_Cpoint_local_indexes)) { hypre_TFree((mgr_data -> reserved_Cpoint_local_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_Cpoint_local_indexes) = NULL; } if (mgr_data -> restrict_type) { hypre_TFree(mgr_data -> restrict_type, HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } if (mgr_data -> interp_type) { hypre_TFree(mgr_data -> interp_type, HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } /* Frelax_method */ if (mgr_data -> Frelax_method) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } /* Frelax_num_functions */ if (mgr_data -> Frelax_num_functions) { hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_num_functions) = NULL; } /* data for V-cycle F-relaxation */ if ((mgr_data -> VcycleRelaxVtemp)) { hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxVtemp) ); (mgr_data -> VcycleRelaxVtemp) = NULL; } if ((mgr_data -> VcycleRelaxZtemp)) { hypre_ParVectorDestroy( (mgr_data -> VcycleRelaxZtemp) ); (mgr_data -> VcycleRelaxZtemp) = NULL; } if (mgr_data -> FrelaxVcycleData) { for (i = 0; i < num_coarse_levels; i++) { if ((mgr_data -> FrelaxVcycleData)[i]) { hypre_MGRDestroyFrelaxVcycleData((mgr_data -> FrelaxVcycleData)[i]); (mgr_data -> FrelaxVcycleData)[i] = NULL; } } hypre_TFree(mgr_data -> FrelaxVcycleData, HYPRE_MEMORY_HOST); (mgr_data -> FrelaxVcycleData) = NULL; } /* data for reserved coarse nodes */ if (mgr_data -> reserved_coarse_indexes) { hypre_TFree(mgr_data -> reserved_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* index array for setting Cpoints by global block */ if ((mgr_data -> set_c_points_method) == 1) { hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST); (mgr_data -> idx_array) = NULL; } /* array for setting option to use non-Galerkin coarse grid */ if (mgr_data -> use_non_galerkin_cg) { hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST); (mgr_data -> use_non_galerkin_cg) = NULL; } /* coarse level matrix - RAP */ if ((mgr_data -> RAP)) { hypre_ParCSRMatrixDestroy((mgr_data -> RAP)); } if ((mgr_data -> diaginv)) { hypre_TFree((mgr_data -> diaginv), HYPRE_MEMORY_HOST); } if ((mgr_data -> global_smoother)) { if (mgr_data -> global_smooth_type == 8) { HYPRE_EuclidDestroy((mgr_data -> global_smoother)); } else if (mgr_data -> global_smooth_type == 16) { HYPRE_ILUDestroy((mgr_data -> global_smoother)); } } /* mgr data */ hypre_TFree(mgr_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Create data for V-cycle F-relaxtion */ void * hypre_MGRCreateFrelaxVcycleData() { hypre_ParAMGData *vdata = hypre_CTAlloc(hypre_ParAMGData, 1, HYPRE_MEMORY_HOST); hypre_ParAMGDataAArray(vdata) = NULL; hypre_ParAMGDataPArray(vdata) = NULL; hypre_ParAMGDataFArray(vdata) = NULL; hypre_ParAMGDataCFMarkerArray(vdata) = NULL; hypre_ParAMGDataVtemp(vdata) = NULL; hypre_ParAMGDataAMat(vdata) = NULL; hypre_ParAMGDataBVec(vdata) = NULL; hypre_ParAMGDataZtemp(vdata) = NULL; hypre_ParAMGDataCommInfo(vdata) = NULL; hypre_ParAMGDataUArray(vdata) = NULL; hypre_ParAMGDataNewComm(vdata) = hypre_MPI_COMM_NULL; hypre_ParAMGDataNumLevels(vdata) = 0; hypre_ParAMGDataMaxLevels(vdata) = 10; hypre_ParAMGDataNumFunctions(vdata) = 1; hypre_ParAMGDataSCommPkgSwitch(vdata) = 1.0; hypre_ParAMGDataRelaxOrder(vdata) = 1; hypre_ParAMGDataMaxCoarseSize(vdata) = 9; hypre_ParAMGDataMinCoarseSize(vdata) = 0; hypre_ParAMGDataUserCoarseRelaxType(vdata) = 9; return (void *) vdata; } /* Destroy data for V-cycle F-relaxation */ HYPRE_Int hypre_MGRDestroyFrelaxVcycleData( void *data ) { hypre_ParAMGData * vdata = (hypre_ParAMGData*) data; HYPRE_Int i; HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(vdata); MPI_Comm new_comm = hypre_ParAMGDataNewComm(vdata); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[0], HYPRE_MEMORY_HOST); for (i = 1; i < num_levels + 1; i++) { if (hypre_ParAMGDataAArray(vdata)[i]) { hypre_ParCSRMatrixDestroy(hypre_ParAMGDataAArray(vdata)[i]); } if (hypre_ParAMGDataPArray(vdata)[i - 1]) { hypre_ParCSRMatrixDestroy(hypre_ParAMGDataPArray(vdata)[i - 1]); } hypre_IntArrayDestroy(hypre_ParAMGDataCFMarkerArray(vdata)[i - 1]); hypre_ParVectorDestroy(hypre_ParAMGDataFArray(vdata)[i]); hypre_ParVectorDestroy(hypre_ParAMGDataUArray(vdata)[i]); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata)[i], HYPRE_MEMORY_HOST); } if (num_levels < 1) { hypre_IntArrayDestroy(hypre_ParAMGDataCFMarkerArray(vdata)[0]); } /* Points to VcycleRelaxVtemp of mgr_data, which is already destroyed */ //hypre_ParVectorDestroy(hypre_ParAMGDataVtemp(vdata)); hypre_TFree(hypre_ParAMGDataFArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataUArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataAArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataPArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata), HYPRE_MEMORY_HOST); //hypre_TFree(hypre_ParAMGDataGridRelaxType(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataDofFuncArray(vdata), HYPRE_MEMORY_HOST); /* Points to VcycleRelaxZtemp of mgr_data, which is already destroyed */ /* if (hypre_ParAMGDataZtemp(vdata)) hypre_ParVectorDestroy(hypre_ParAMGDataZtemp(vdata)); */ if (hypre_ParAMGDataAMat(vdata)) { hypre_TFree(hypre_ParAMGDataAMat(vdata), HYPRE_MEMORY_HOST); } if (hypre_ParAMGDataBVec(vdata)) { hypre_TFree(hypre_ParAMGDataBVec(vdata), HYPRE_MEMORY_HOST); } if (hypre_ParAMGDataCommInfo(vdata)) { hypre_TFree(hypre_ParAMGDataCommInfo(vdata), HYPRE_MEMORY_HOST); } if (new_comm != hypre_MPI_COMM_NULL) { hypre_MPI_Comm_free (&new_comm); } hypre_TFree(vdata, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Set C-point variables for each reduction level */ /* Currently not implemented */ HYPRE_Int hypre_MGRSetReductionLevelCpoints( void *mgr_vdata, HYPRE_Int nlevels, HYPRE_Int *num_coarse_points, HYPRE_Int **level_coarse_indexes) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_coarse_levels) = nlevels; (mgr_data -> num_coarse_per_level) = num_coarse_points; (mgr_data -> level_coarse_indexes) = level_coarse_indexes; return hypre_error_flag; } /* Initialize some data */ /* Set whether non-coarse points on each level should be explicitly tagged as F-points */ HYPRE_Int hypre_MGRSetNonCpointsToFpoints( void *mgr_vdata, HYPRE_Int nonCptToFptFlag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> set_non_Cpoints_to_F) = nonCptToFptFlag; return hypre_error_flag; } /* Set whether the reserved C points are reduced before the coarse grid solve */ HYPRE_Int hypre_MGRSetReservedCpointsLevelToKeep(void *mgr_vdata, HYPRE_Int level) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> lvl_to_keep_cpoints) = level; return hypre_error_flag; } /* Set Cpoints by contiguous blocks, i.e. p1, p2, ..., pn, s1, s2, ..., sn, ... */ HYPRE_Int hypre_MGRSetCpointsByContiguousBlock( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_BigInt *begin_idx_array, HYPRE_Int *block_num_coarse_points, HYPRE_Int **block_coarse_indexes) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; if ((mgr_data -> idx_array) != NULL) { hypre_TFree(mgr_data -> idx_array, HYPRE_MEMORY_HOST); (mgr_data -> idx_array) = NULL; } HYPRE_BigInt *index_array = hypre_CTAlloc(HYPRE_BigInt, block_size, HYPRE_MEMORY_HOST); if (begin_idx_array != NULL) { for (i = 0; i < block_size; i++) { index_array[i] = *(begin_idx_array + i); } } hypre_MGRSetCpointsByBlock(mgr_data, block_size, max_num_levels, block_num_coarse_points, block_coarse_indexes); (mgr_data -> idx_array) = index_array; (mgr_data -> set_c_points_method) = 1; return hypre_error_flag; } /* Initialize/ set local block data information */ HYPRE_Int hypre_MGRSetCpointsByBlock( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_Int *block_num_coarse_points, HYPRE_Int **block_coarse_indexes) { HYPRE_Int i, j; HYPRE_Int **block_cf_marker = NULL; HYPRE_Int *block_num_coarse_indexes = NULL; hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; /* free block cf_marker data if not previously destroyed */ if ((mgr_data -> block_cf_marker) != NULL) { for (i = 0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker)[i] = NULL; } } hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if ((mgr_data -> block_num_coarse_indexes)) { hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* store block cf_marker */ block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST); memset(block_cf_marker[i], FMRK, block_size * sizeof(HYPRE_Int)); } for (i = 0; i < max_num_levels; i++) { for (j = 0; j < block_num_coarse_points[i]; j++) { (block_cf_marker[i])[block_coarse_indexes[i][j]] = CMRK; } } /* store block_num_coarse_points */ if (max_num_levels > 0) { block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_num_coarse_indexes[i] = block_num_coarse_points[i]; } } /* set block data */ (mgr_data -> max_num_coarse_levels) = max_num_levels; (mgr_data -> block_size) = block_size; (mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes; (mgr_data -> block_cf_marker) = block_cf_marker; (mgr_data -> set_c_points_method) = 0; return hypre_error_flag; } HYPRE_Int hypre_MGRSetCpointsByPointMarkerArray( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_Int *lvl_num_coarse_points, HYPRE_Int **lvl_coarse_indexes, HYPRE_Int *point_marker_array) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i, j; HYPRE_Int **block_cf_marker = NULL; HYPRE_Int *block_num_coarse_indexes = NULL; /* free block cf_marker data if not previously destroyed */ if ((mgr_data -> block_cf_marker) != NULL) { for (i = 0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker)[i] = NULL; } } hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if ((mgr_data -> block_num_coarse_indexes)) { hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* store block cf_marker */ block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST); memset(block_cf_marker[i], FMRK, block_size * sizeof(HYPRE_Int)); } for (i = 0; i < max_num_levels; i++) { for (j = 0; j < lvl_num_coarse_points[i]; j++) { block_cf_marker[i][j] = lvl_coarse_indexes[i][j]; } } /* store block_num_coarse_points */ if (max_num_levels > 0) { block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_num_coarse_indexes[i] = lvl_num_coarse_points[i]; } } /* set block data */ (mgr_data -> max_num_coarse_levels) = max_num_levels; (mgr_data -> block_size) = block_size; (mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes; (mgr_data -> block_cf_marker) = block_cf_marker; (mgr_data -> point_marker_array) = point_marker_array; (mgr_data -> set_c_points_method) = 2; return hypre_error_flag; } /*Set number of points that remain part of the coarse grid throughout the hierarchy */ HYPRE_Int hypre_MGRSetReservedCoarseNodes(void *mgr_vdata, HYPRE_Int reserved_coarse_size, HYPRE_BigInt *reserved_cpt_index) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_BigInt *reserved_coarse_indexes = NULL; HYPRE_Int i; if (!mgr_data) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Warning! MGR object empty!\n"); return hypre_error_flag; } if (reserved_coarse_size < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* free data not previously destroyed */ if ((mgr_data -> reserved_coarse_indexes)) { hypre_TFree((mgr_data -> reserved_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* set reserved coarse nodes */ if (reserved_coarse_size > 0) { reserved_coarse_indexes = hypre_CTAlloc(HYPRE_BigInt, reserved_coarse_size, HYPRE_MEMORY_HOST); for (i = 0; i < reserved_coarse_size; i++) { reserved_coarse_indexes[i] = reserved_cpt_index[i]; } } (mgr_data -> reserved_coarse_size) = reserved_coarse_size; (mgr_data -> reserved_coarse_indexes) = reserved_coarse_indexes; return hypre_error_flag; } /* Set CF marker array */ HYPRE_Int hypre_MGRCoarsen(hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int fixed_coarse_size, HYPRE_Int *fixed_coarse_indexes, HYPRE_Int debug_flag, hypre_IntArray **CF_marker_ptr, HYPRE_Int cflag) { HYPRE_Int *CF_marker = NULL; HYPRE_Int *cindexes = fixed_coarse_indexes; HYPRE_Int i, row, nc; HYPRE_Int nloc = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* If this is the last level, coarsen onto fixed coarse set */ if (cflag) { if (*CF_marker_ptr != NULL) { hypre_IntArrayDestroy(*CF_marker_ptr); } *CF_marker_ptr = hypre_IntArrayCreate(nloc); hypre_IntArrayInitialize(*CF_marker_ptr); hypre_IntArraySetConstantValues(*CF_marker_ptr, FMRK); CF_marker = hypre_IntArrayData(*CF_marker_ptr); /* first mark fixed coarse set */ nc = fixed_coarse_size; for (i = 0; i < nc; i++) { CF_marker[cindexes[i]] = CMRK; } } else { /* First coarsen to get initial CF splitting. * This is then followed by updating the CF marker to pass * coarse information to the next levels. NOTE: It may be * convenient to implement this way (allows the use of multiple * coarsening strategies without changing too much code), * but not necessarily the best option, compared to initializing * CF_marker first and then coarsening on subgraph which excludes * the initialized coarse nodes. */ hypre_BoomerAMGCoarsen(S, A, 0, debug_flag, CF_marker_ptr); CF_marker = hypre_IntArrayData(*CF_marker_ptr); /* Update CF_marker to correct Cpoints marked as Fpoints. */ nc = fixed_coarse_size; for (i = 0; i < nc; i++) { CF_marker[cindexes[i]] = CMRK; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ for (row = 0; row < nloc; row++) { if (CF_marker[row] == CMRK) { continue; } CF_marker[row] = FMRK; } #if 0 /* IMPORTANT: Update coarse_indexes array to define the positions of the fixed coarse points * in the next level. */ nc = 0; index_i = 0; for (row = 0; row < nloc; row++) { /* loop through new c-points */ if (CF_marker[row] == CMRK) { nc++; } else if (CF_marker[row] == S_CMRK) { /* previously marked c-point is part of fixed coarse set. Track its current local index */ cindexes[index_i++] = nc; /* reset c-point from S_CMRK to CMRK */ cf_marker[row] = CMRK; nc++; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ else { CF_marker[row] = FMRK; } } /* check if this should be last level */ if ( nc == fixed_coarse_size) { last_level = 1; } //printf(" nc = %d and fixed coarse size = %d \n", nc, fixed_coarse_size); #endif } return hypre_error_flag; } HYPRE_Int hypre_ExtendWtoPHost(HYPRE_Int P_nr_of_rows, HYPRE_Int *CF_marker, HYPRE_Int *W_diag_i, HYPRE_Int *W_diag_j, HYPRE_Complex *W_diag_data, HYPRE_Int *P_diag_i, HYPRE_Int *P_diag_j, HYPRE_Complex *P_diag_data, HYPRE_Int *W_offd_i, HYPRE_Int *P_offd_i ) { HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int coarse_counter; HYPRE_Int i, jj; HYPRE_Real one = 1.0; /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ fine_to_coarse = hypre_CTAlloc(HYPRE_Int, P_nr_of_rows, HYPRE_MEMORY_HOST); for (i = 0; i < P_nr_of_rows; i++) { fine_to_coarse[i] = -1; } /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ HYPRE_Int row_counter = 0; coarse_counter = 0; for (i = 0; i < P_nr_of_rows; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { fine_to_coarse[i] = coarse_counter; coarse_counter++; } } /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; row_counter = 0; for (i = 0; i < P_nr_of_rows; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = W_diag_i[row_counter]; jj < W_diag_i[row_counter + 1]; jj++) { //P_marker[row_counter] = jj_counter; P_diag_j[jj_counter] = W_diag_j[jj]; P_diag_data[jj_counter] = W_diag_data[jj]; jj_counter++; } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_counter_offd += W_offd_i[row_counter + 1] - W_offd_i[row_counter]; row_counter++; } /* update off-diagonal row pointer */ P_offd_i[i + 1] = jj_counter_offd; } P_diag_i[P_nr_of_rows] = jj_counter; hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); return 0; } /* Interpolation for MGR - Adapted from BoomerAMGBuildInterp */ HYPRE_Int hypre_MGRBuildPHost( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int method, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs, my_id; HYPRE_Int A_nr_of_rows = hypre_ParCSRMatrixNumRows(A); hypre_ParCSRMatrix *A_FF = NULL, *A_FC = NULL, *P = NULL; hypre_CSRMatrix *W_diag = NULL, *W_offd = NULL; HYPRE_Int P_diag_nnz, nfpoints; HYPRE_Int *P_diag_i = NULL, *P_diag_j = NULL, *P_offd_i = NULL; HYPRE_Complex *P_diag_data = NULL, *diag = NULL, *diag1 = NULL; HYPRE_BigInt nC_global; HYPRE_Int i; HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); nfpoints = 0; for (i = 0; i < A_nr_of_rows; i++) { if (CF_marker[i] == -1) { nfpoints++; } } if (method > 0) { hypre_ParCSRMatrixGenerateFFFC(A, CF_marker, num_cpts_global, NULL, &A_FC, &A_FF); diag = hypre_CTAlloc(HYPRE_Complex, nfpoints, memory_location_P); if (method == 1) { // extract diag inverse sqrt // hypre_CSRMatrixExtractDiagonalHost(hypre_ParCSRMatrixDiag(A_FF), diag, 3); // L1-Jacobi-type interpolation HYPRE_Complex scal = 1.0; diag1 = hypre_CTAlloc(HYPRE_Complex, nfpoints, memory_location_P); hypre_CSRMatrixExtractDiagonalHost(hypre_ParCSRMatrixDiag(A_FF), diag, 0); hypre_CSRMatrixComputeRowSumHost(hypre_ParCSRMatrixDiag(A_FF), NULL, NULL, diag1, 1, 1.0, "set"); hypre_CSRMatrixComputeRowSumHost(hypre_ParCSRMatrixDiag(A_FC), NULL, NULL, diag1, 1, 1.0, "add"); hypre_CSRMatrixComputeRowSumHost(hypre_ParCSRMatrixOffd(A_FF), NULL, NULL, diag1, 1, 1.0, "add"); hypre_CSRMatrixComputeRowSumHost(hypre_ParCSRMatrixOffd(A_FC), NULL, NULL, diag1, 1, 1.0, "add"); for (i = 0; i < nfpoints; i++) { HYPRE_Complex dsum = diag[i] + scal * (diag1[i] - hypre_cabs(diag[i])); diag[i] = 1. / dsum; } hypre_TFree(diag1, memory_location_P); } else if (method == 2) { // extract diag inverse hypre_CSRMatrixExtractDiagonalHost(hypre_ParCSRMatrixDiag(A_FF), diag, 2); } for (i = 0; i < nfpoints; i++) { diag[i] = -diag[i]; } hypre_Vector *D_FF_inv = hypre_SeqVectorCreate(nfpoints); hypre_VectorData(D_FF_inv) = diag; hypre_SeqVectorInitialize_v2(D_FF_inv, memory_location_P); hypre_CSRMatrixDiagScale(hypre_ParCSRMatrixDiag(A_FC), D_FF_inv, NULL); hypre_CSRMatrixDiagScale(hypre_ParCSRMatrixOffd(A_FC), D_FF_inv, NULL); hypre_SeqVectorDestroy(D_FF_inv); W_diag = hypre_ParCSRMatrixDiag(A_FC); W_offd = hypre_ParCSRMatrixOffd(A_FC); nC_global = hypre_ParCSRMatrixGlobalNumCols(A_FC); } else { W_diag = hypre_CSRMatrixCreate(nfpoints, A_nr_of_rows - nfpoints, 0); W_offd = hypre_CSRMatrixCreate(nfpoints, 0, 0); hypre_CSRMatrixInitialize_v2(W_diag, 0, memory_location_P); hypre_CSRMatrixInitialize_v2(W_offd, 0, memory_location_P); if (my_id == (num_procs - 1)) { nC_global = num_cpts_global[1]; } hypre_MPI_Bcast(&nC_global, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); } /* Construct P from matrix product W_diag */ P_diag_nnz = hypre_CSRMatrixNumNonzeros(W_diag) + hypre_CSRMatrixNumCols(W_diag); P_diag_i = hypre_CTAlloc(HYPRE_Int, A_nr_of_rows + 1, memory_location_P); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_nnz, memory_location_P); P_diag_data = hypre_CTAlloc(HYPRE_Complex, P_diag_nnz, memory_location_P); P_offd_i = hypre_CTAlloc(HYPRE_Int, A_nr_of_rows + 1, memory_location_P); /* Extend W data to P data */ hypre_ExtendWtoPHost( A_nr_of_rows, CF_marker, hypre_CSRMatrixI(W_diag), hypre_CSRMatrixJ(W_diag), hypre_CSRMatrixData(W_diag), P_diag_i, P_diag_j, P_diag_data, hypre_CSRMatrixI(W_offd), P_offd_i ); // finalize P P = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), nC_global, hypre_ParCSRMatrixColStarts(A), num_cpts_global, hypre_CSRMatrixNumCols(W_offd), P_diag_nnz, hypre_CSRMatrixNumNonzeros(W_offd) ); hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(P)) = memory_location_P; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(P)) = memory_location_P; hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(P)) = P_diag_i; hypre_CSRMatrixJ(hypre_ParCSRMatrixDiag(P)) = P_diag_j; hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(P)) = P_diag_data; hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(P)) = P_offd_i; hypre_CSRMatrixJ(hypre_ParCSRMatrixOffd(P)) = hypre_CSRMatrixJ(W_offd); hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(P)) = hypre_CSRMatrixData(W_offd); hypre_CSRMatrixJ(W_offd) = NULL; hypre_CSRMatrixData(W_offd) = NULL; if (method > 0) { hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(A_FC); hypre_ParCSRMatrixColMapOffd(P) = hypre_ParCSRMatrixColMapOffd(A_FC); hypre_ParCSRMatrixColMapOffd(A_FC) = NULL; hypre_ParCSRMatrixColMapOffd(A_FC) = NULL; hypre_ParCSRMatrixNumNonzeros(P) = hypre_ParCSRMatrixNumNonzeros( A_FC) + hypre_ParCSRMatrixGlobalNumCols(A_FC); } else { hypre_ParCSRMatrixNumNonzeros(P) = nC_global; } hypre_ParCSRMatrixDNumNonzeros(P) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(P); hypre_MatvecCommPkgCreate(P); *P_ptr = P; if (A_FF) { hypre_ParCSRMatrixDestroy(A_FF); } if (A_FC) { hypre_ParCSRMatrixDestroy(A_FC); } if (method <= 0) { hypre_CSRMatrixDestroy(W_diag); hypre_CSRMatrixDestroy(W_offd); } return hypre_error_flag; } /* Interpolation for MGR - Adapted from BoomerAMGBuildInterp */ HYPRE_Int hypre_MGRBuildP( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int method, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i, i1; HYPRE_Int j, jl, jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if ((CF_marker[i1] >= 0) && (method > 0)) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; if ((CF_marker_offd[i1] >= 0) && (method > 0)) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads - 1; i++) { coarse_counter[i + 1] += coarse_counter[i]; jj_count[i + 1] += jj_count[i]; jj_count_offd[i + 1] += jj_count_offd[i]; } i = num_threads - 1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) { coarse_shift = coarse_counter[j - 1]; } size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } /* index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) big_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]+ my_first_cpt; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); */ if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { if (CF_marker[i] < 0) { for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if ( i == i1 ) /* diagonal of A only */ { a_diag[i] = 1.0 / A_diag_data[jj]; } } } } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (jl < rest) { ns = jl * size + jl; ne = (jl + 1) * size + jl + 1; } else { ns = jl * size + rest; ne = (jl + 1) * size + rest; } jj_counter = 0; if (jl > 0) { jj_counter = jj_count[jl - 1]; } jj_counter_offd = 0; if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; } P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } else { P_marker_offd = NULL; } for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if ((CF_marker[i1] >= 0) && (method > 0)) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; /* if(method == 0) { P_diag_data[jj_counter] = 0.0; } */ if (method == 1) { P_diag_data[jj_counter] = - A_diag_data[jj]; } else if (method == 2) { P_diag_data[jj_counter] = - A_diag_data[jj] * a_diag[i]; } jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if ((CF_marker_offd[i1] >= 0) && (method > 0)) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; /* if(method == 0) { P_offd_data[jj_counter_offd] = 0.0; } */ if (method == 1) { P_offd_data[jj_counter_offd] = - A_offd_data[jj]; } else if (method == 2) { P_offd_data[jj_counter_offd] = - A_offd_data[jj] * a_diag[i]; } jj_counter_offd++; } } } } P_offd_i[i + 1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < num_cols_A_offd; i++) { P_marker[i] = 0; } num_cols_P_offd = 0; for (i = 0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_cols_P_offd; i++) { while (P_marker[index] == 0) { index++; } tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i = 0; i < n_fine; i++) if (CF_marker[i] == -3) { CF_marker[i] = -1; } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); //hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return (0); } /* Interpolation for MGR - Dynamic Row Sum method */ HYPRE_Int hypre_MGRBuildPDRS( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i, i1; HYPRE_Int j, jl, jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } /*-------------------------------------------------------------------- * Set up the indexes for the DRS method *--------------------------------------------------------------------*/ } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads - 1; i++) { coarse_counter[i + 1] += coarse_counter[i]; jj_count[i + 1] += jj_count[i]; jj_count_offd[i + 1] += jj_count_offd[i]; } i = num_threads - 1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) { coarse_shift = coarse_counter[j - 1]; } size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); */ if (debug_flag == 4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag == 4) { wall_time = time_getWallclockSeconds(); } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if ( i == i1 ) /* diagonal of A only */ { a_diag[i] = 1.0 / A_diag_data[jj]; } } } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (jl < rest) { ns = jl * size + jl; ne = (jl + 1) * size + jl + 1; } else { ns = jl * size + rest; ne = (jl + 1) * size + rest; } jj_counter = 0; if (jl > 0) { jj_counter = jj_count[jl - 1]; } jj_counter_offd = 0; if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; } P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } else { P_marker_offd = NULL; } for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = - A_diag_data[jj] * a_diag[i]; jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = - A_offd_data[jj] * a_diag[i]; jj_counter_offd++; } } } } P_offd_i[i + 1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < num_cols_A_offd; i++) { P_marker[i] = 0; } num_cols_P_offd = 0; for (i = 0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_cols_P_offd; i++) { while (P_marker[index] == 0) { index++; } tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i = 0; i < n_fine; i++) if (CF_marker[i] == -3) { CF_marker[i] = -1; } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P, A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); // hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return (0); } /* Scale ParCSR matrix A = scalar * A * A: the target CSR matrix * vector: array of real numbers */ HYPRE_Int hypre_ParCSRMatrixLeftScale(HYPRE_Real *vector, hypre_ParCSRMatrix *A) { HYPRE_Int i, j, n_local; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); n_local = hypre_CSRMatrixNumRows(A_diag); for (i = 0; i < n_local; i++) { HYPRE_Real factor = vector[i]; for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { A_diag_data[j] *= factor; } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { A_offd_data[j] *= factor; } } return (0); } /************************************************************ * Available methods: * 0: inv(A_FF) approximated by its diagonal inverse * 1: inv(A_FF) approximated by sparse approximate inverse *************************************************************/ HYPRE_Int hypre_MGRComputeNonGalerkinCoarseGrid(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *P, hypre_ParCSRMatrix *RT, HYPRE_Int bsize, HYPRE_Int ordering, HYPRE_Int method, HYPRE_Int Pmax, HYPRE_Int keep_stencil, HYPRE_Int *CF_marker, hypre_ParCSRMatrix **A_h_ptr) { HYPRE_Int *c_marker, *f_marker; HYPRE_Int n_local_fine_grid, i, i1, jj; hypre_ParCSRMatrix *A_cc; hypre_ParCSRMatrix *A_ff; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *A_cf; hypre_ParCSRMatrix *A_h; hypre_ParCSRMatrix *A_h_correction; HYPRE_Int max_elmts = Pmax; // HYPRE_Real wall_time = 0.; hypre_ParCSRMatrix *P_mod = NULL; HYPRE_Int my_id; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_MPI_Comm_rank(comm, &my_id); HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); n_local_fine_grid = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); c_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST); f_marker = hypre_CTAlloc(HYPRE_Int, n_local_fine_grid, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fine_grid; i++) { HYPRE_Int point_type = CF_marker[i]; hypre_assert(point_type == 1 || point_type == -1); c_marker[i] = point_type; f_marker[i] = -point_type; } // get the A_cc sub-block hypre_MGRGetSubBlock(A, c_marker, c_marker, 0, &A_cc); if (method == 0) { if (keep_stencil) { //wall_time = time_getWallclockSeconds(); hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf); hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc); hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff); // extract the diagonal of A_ff and compute D_ff_inv hypre_CSRMatrix *A_ff_diag = hypre_ParCSRMatrixDiag(A_ff); HYPRE_Real *A_ff_diag_data = hypre_CSRMatrixData(A_ff_diag); HYPRE_Int *A_ff_diag_i = hypre_CSRMatrixI(A_ff_diag); HYPRE_Int *A_ff_diag_j = hypre_CSRMatrixJ(A_ff_diag); HYPRE_Int n_local_fpoints = hypre_CSRMatrixNumRows(A_ff_diag); HYPRE_Real *D_ff_inv; D_ff_inv = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fpoints; i++) { for (jj = A_ff_diag_i[i]; jj < A_ff_diag_i[i + 1]; jj++) { i1 = A_ff_diag_j[jj]; if ( i == i1 ) { D_ff_inv[i] = -1.0 / A_ff_diag_data[jj]; } } } // extract the diagonal of A_cf hypre_CSRMatrix *A_cf_diag = hypre_ParCSRMatrixDiag(A_cf); HYPRE_Real *A_cf_diag_data = hypre_CSRMatrixData(A_cf_diag); HYPRE_Int *A_cf_diag_i = hypre_CSRMatrixI(A_cf_diag); HYPRE_Int *A_cf_diag_j = hypre_CSRMatrixJ(A_cf_diag); n_local_fpoints = hypre_CSRMatrixNumRows(A_cf_diag); HYPRE_Real *D_cf; D_cf = hypre_CTAlloc(HYPRE_Real, n_local_fpoints, HYPRE_MEMORY_HOST); for (i = 0; i < n_local_fpoints; i++) { i1 = A_cf_diag_j[A_cf_diag_i[i]]; D_cf[i] = A_cf_diag_data[jj]; } // compute the triple product hypre_ParCSRMatrixLeftScale(D_ff_inv, A_fc); hypre_ParCSRMatrixLeftScale(D_cf, A_fc); A_h_correction = A_fc; hypre_TFree(D_cf, HYPRE_MEMORY_HOST); hypre_TFree(D_ff_inv, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_cf); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Compute triple product D_cf * D_ff_inv * A_fc time: %1.5f\n", wall_time); } else { //wall_time = time_getWallclockSeconds(); P_mod = hypre_ParCSRMatrixCompleteClone(P); hypre_ParCSRMatrixCopy(P, P_mod, 1); HYPRE_Int n_local_rows = hypre_ParCSRMatrixNumRows(P_mod); hypre_CSRMatrix *P_mod_diag = hypre_ParCSRMatrixDiag(P_mod); HYPRE_Int *P_mod_diag_i = hypre_CSRMatrixI(P_mod_diag); HYPRE_Real *P_mod_diag_data = hypre_CSRMatrixData(P_mod_diag); for (i = 0; i < n_local_rows; i ++) { if (CF_marker[i] >= 0) { HYPRE_Int ii = P_mod_diag_i[i]; P_mod_diag_data[ii] = 0.0; } } hypre_BoomerAMGBuildCoarseOperator(RT, A, P_mod, &A_h_correction); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Compute triple product time new: %1.5f\n", wall_time); hypre_ParCSRMatrixDestroy(P_mod); } } else { // Approximate inverse for ideal interploation hypre_MGRGetSubBlock(A, c_marker, f_marker, 0, &A_cf); hypre_MGRGetSubBlock(A, f_marker, c_marker, 0, &A_fc); hypre_MGRGetSubBlock(A, f_marker, f_marker, 0, &A_ff); hypre_ParCSRMatrix *A_ff_inv = NULL; hypre_ParCSRMatrix *minus_Wp = NULL; hypre_MGRApproximateInverse(A_ff, &A_ff_inv); minus_Wp = hypre_ParMatmul(A_ff_inv, A_fc); A_h_correction = hypre_ParMatmul(A_cf, minus_Wp); hypre_ParCSRMatrixDestroy(minus_Wp); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(A_cf); } // perform dropping for A_h_correction // specific to multiphase poromechanics // we only keep the diagonal of each block //wall_time = time_getWallclockSeconds(); HYPRE_Int n_local_cpoints = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_h_correction)); hypre_CSRMatrix *A_h_correction_diag = hypre_ParCSRMatrixDiag(A_h_correction); HYPRE_Real *A_h_correction_diag_data = hypre_CSRMatrixData(A_h_correction_diag); HYPRE_Int *A_h_correction_diag_i = hypre_CSRMatrixI(A_h_correction_diag); HYPRE_Int *A_h_correction_diag_j = hypre_CSRMatrixJ(A_h_correction_diag); HYPRE_Int ncol_diag = hypre_CSRMatrixNumCols(A_h_correction_diag); hypre_CSRMatrix *A_h_correction_offd = hypre_ParCSRMatrixOffd(A_h_correction); HYPRE_Real *A_h_correction_offd_data = hypre_CSRMatrixData(A_h_correction_offd); HYPRE_Int *A_h_correction_offd_i = hypre_CSRMatrixI(A_h_correction_offd); HYPRE_Int *A_h_correction_offd_j = hypre_CSRMatrixJ(A_h_correction_offd); if (Pmax > 0) { if (ordering == 0) // interleaved ordering { HYPRE_Int *A_h_correction_diag_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints + 1, memory_location); HYPRE_Int *A_h_correction_diag_j_new = hypre_CTAlloc(HYPRE_Int, (bsize + max_elmts) * n_local_cpoints, memory_location); HYPRE_Complex *A_h_correction_diag_data_new = hypre_CTAlloc(HYPRE_Complex, (bsize + max_elmts) * n_local_cpoints, memory_location); HYPRE_Int num_nonzeros_diag_new = 0; HYPRE_Int *A_h_correction_offd_i_new = hypre_CTAlloc(HYPRE_Int, n_local_cpoints + 1, memory_location); HYPRE_Int *A_h_correction_offd_j_new = hypre_CTAlloc(HYPRE_Int, max_elmts * n_local_cpoints, memory_location); HYPRE_Complex *A_h_correction_offd_data_new = hypre_CTAlloc(HYPRE_Complex, max_elmts * n_local_cpoints, memory_location); HYPRE_Int num_nonzeros_offd_new = 0; for (i = 0; i < n_local_cpoints; i++) { HYPRE_Int max_num_nonzeros = A_h_correction_diag_i[i + 1] - A_h_correction_diag_i[i] + A_h_correction_offd_i[i + 1] - A_h_correction_offd_i[i]; HYPRE_Int *aux_j = hypre_CTAlloc(HYPRE_Int, max_num_nonzeros, HYPRE_MEMORY_HOST); HYPRE_Real *aux_data = hypre_CTAlloc(HYPRE_Real, max_num_nonzeros, HYPRE_MEMORY_HOST); HYPRE_Int row_start = i - (i % bsize); HYPRE_Int row_stop = row_start + bsize - 1; HYPRE_Int cnt = 0; for (jj = A_h_correction_offd_i[i]; jj < A_h_correction_offd_i[i + 1]; jj++) { aux_j[cnt] = A_h_correction_offd_j[jj] + ncol_diag; aux_data[cnt] = A_h_correction_offd_data[jj]; cnt++; } for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i + 1]; jj++) { aux_j[cnt] = A_h_correction_diag_j[jj]; aux_data[cnt] = A_h_correction_diag_data[jj]; cnt++; } hypre_qsort2_abs(aux_j, aux_data, 0, cnt - 1); for (jj = A_h_correction_diag_i[i]; jj < A_h_correction_diag_i[i + 1]; jj++) { i1 = A_h_correction_diag_j[jj]; if (i1 >= row_start && i1 <= row_stop) { // copy data to new arrays A_h_correction_diag_j_new[num_nonzeros_diag_new] = i1; A_h_correction_diag_data_new[num_nonzeros_diag_new] = A_h_correction_diag_data[jj]; ++num_nonzeros_diag_new; } else { // Do nothing } } if (max_elmts > 0) { for (jj = 0; jj < hypre_min(max_elmts, cnt); jj++) { HYPRE_Int col_idx = aux_j[jj]; HYPRE_Real col_value = aux_data[jj]; if (col_idx < ncol_diag && (col_idx < row_start || col_idx > row_stop)) { A_h_correction_diag_j_new[num_nonzeros_diag_new] = col_idx; A_h_correction_diag_data_new[num_nonzeros_diag_new] = col_value; ++num_nonzeros_diag_new; } else if (col_idx >= ncol_diag) { A_h_correction_offd_j_new[num_nonzeros_offd_new] = col_idx - ncol_diag; A_h_correction_offd_data_new[num_nonzeros_offd_new] = col_value; ++num_nonzeros_offd_new; } } } A_h_correction_diag_i_new[i + 1] = num_nonzeros_diag_new; A_h_correction_offd_i_new[i + 1] = num_nonzeros_offd_new; hypre_TFree(aux_j, HYPRE_MEMORY_HOST); hypre_TFree(aux_data, HYPRE_MEMORY_HOST); } hypre_TFree(A_h_correction_diag_i, memory_location); hypre_TFree(A_h_correction_diag_j, memory_location); hypre_TFree(A_h_correction_diag_data, memory_location); hypre_CSRMatrixI(A_h_correction_diag) = A_h_correction_diag_i_new; hypre_CSRMatrixJ(A_h_correction_diag) = A_h_correction_diag_j_new; hypre_CSRMatrixData(A_h_correction_diag) = A_h_correction_diag_data_new; hypre_CSRMatrixNumNonzeros(A_h_correction_diag) = num_nonzeros_diag_new; if (A_h_correction_offd_i) { hypre_TFree(A_h_correction_offd_i, memory_location); } if (A_h_correction_offd_j) { hypre_TFree(A_h_correction_offd_j, memory_location); } if (A_h_correction_offd_data) { hypre_TFree(A_h_correction_offd_data, memory_location); } hypre_CSRMatrixI(A_h_correction_offd) = A_h_correction_offd_i_new; hypre_CSRMatrixJ(A_h_correction_offd) = A_h_correction_offd_j_new; hypre_CSRMatrixData(A_h_correction_offd) = A_h_correction_offd_data_new; hypre_CSRMatrixNumNonzeros(A_h_correction_offd) = num_nonzeros_offd_new; } else { hypre_printf("Error!! Block ordering for non-Galerkin coarse grid is not currently supported\n"); exit(-1); } } //hypre_MGRParCSRMatrixTruncate(A_h_correction, max_elmts); //wall_time = time_getWallclockSeconds() - wall_time; //hypre_printf("Filter A_h_correction time: %1.5f\n", wall_time); //hypre_ParCSRMatrixPrintIJ(A_h_correction,1,1,"A_h_correction_filtered"); // coarse grid / schur complement hypre_ParCSRMatrixAdd(1.0, A_cc, 1.0, A_h_correction, &A_h); *A_h_ptr = A_h; //hypre_ParCSRMatrixPrintIJ(A_h,1,1,"A_h"); hypre_ParCSRMatrixDestroy(A_cc); hypre_ParCSRMatrixDestroy(A_h_correction); hypre_TFree(c_marker, HYPRE_MEMORY_HOST); hypre_TFree(f_marker, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRComputeAlgebraicFixedStress(hypre_ParCSRMatrix *A, HYPRE_BigInt *mgr_idx_array, HYPRE_Solver A_ff_solver) { HYPRE_Int *U_marker, *S_marker, *P_marker; HYPRE_Int n_fine, i; HYPRE_BigInt ibegin; hypre_ParCSRMatrix *A_up; hypre_ParCSRMatrix *A_uu; hypre_ParCSRMatrix *A_su; hypre_ParCSRMatrix *A_pu; hypre_ParVector *e1_vector; hypre_ParVector *e2_vector; hypre_ParVector *e3_vector; hypre_ParVector *e4_vector; hypre_ParVector *e5_vector; n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); ibegin = hypre_ParCSRMatrixFirstRowIndex(A); hypre_assert(ibegin == mgr_idx_array[0]); U_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); S_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { U_marker[i] = -1; S_marker[i] = -1; P_marker[i] = -1; } // create C and F markers for (i = 0; i < n_fine; i++) { if (i < mgr_idx_array[1] - ibegin) { U_marker[i] = 1; } else if (i >= (mgr_idx_array[1] - ibegin) && i < (mgr_idx_array[2] - ibegin)) { S_marker[i] = 1; } else { P_marker[i] = 1; } } // Get A_up hypre_MGRGetSubBlock(A, U_marker, P_marker, 0, &A_up); // GetA_uu hypre_MGRGetSubBlock(A, U_marker, U_marker, 0, &A_uu); // Get A_su hypre_MGRGetSubBlock(A, S_marker, U_marker, 0, &A_su); // Get A_pu hypre_MGRGetSubBlock(A, P_marker, U_marker, 0, &A_pu); e1_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_up), hypre_ParCSRMatrixGlobalNumCols(A_up), hypre_ParCSRMatrixColStarts(A_up)); hypre_ParVectorInitialize(e1_vector); hypre_ParVectorSetConstantValues(e1_vector, 1.0); e2_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu), hypre_ParCSRMatrixGlobalNumRows(A_uu), hypre_ParCSRMatrixRowStarts(A_uu)); hypre_ParVectorInitialize(e2_vector); hypre_ParVectorSetConstantValues(e2_vector, 0.0); e3_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_uu), hypre_ParCSRMatrixGlobalNumRows(A_uu), hypre_ParCSRMatrixRowStarts(A_uu)); hypre_ParVectorInitialize(e3_vector); hypre_ParVectorSetConstantValues(e3_vector, 0.0); e4_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_su), hypre_ParCSRMatrixGlobalNumRows(A_su), hypre_ParCSRMatrixRowStarts(A_su)); hypre_ParVectorInitialize(e4_vector); hypre_ParVectorSetConstantValues(e4_vector, 0.0); e5_vector = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A_pu), hypre_ParCSRMatrixGlobalNumRows(A_pu), hypre_ParCSRMatrixRowStarts(A_pu)); hypre_ParVectorInitialize(e5_vector); hypre_ParVectorSetConstantValues(e5_vector, 0.0); // compute e2 = A_up * e1 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_up, e1_vector, 0.0, e2_vector, e2_vector); // solve e3 = A_uu^-1 * e2 hypre_BoomerAMGSolve(A_ff_solver, A_uu, e2_vector, e3_vector); // compute e4 = A_su * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector); // compute e4 = A_su * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_su, e3_vector, 0.0, e4_vector, e4_vector); // print e4 hypre_ParVectorPrintIJ(e4_vector, 1, "Dsp"); // compute e5 = A_pu * e3 hypre_ParCSRMatrixMatvecOutOfPlace(1.0, A_pu, e3_vector, 0.0, e5_vector, e5_vector); hypre_ParVectorPrintIJ(e5_vector, 1, "Dpp"); hypre_ParVectorDestroy(e1_vector); hypre_ParVectorDestroy(e2_vector); hypre_ParVectorDestroy(e3_vector); hypre_ParCSRMatrixDestroy(A_uu); hypre_ParCSRMatrixDestroy(A_up); hypre_ParCSRMatrixDestroy(A_pu); hypre_ParCSRMatrixDestroy(A_su); hypre_TFree(U_marker, HYPRE_MEMORY_HOST); hypre_TFree(S_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); return hypre_error_flag; } HYPRE_Int hypre_MGRApproximateInverse(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **A_inv) { HYPRE_Int print_level, mr_max_row_nnz, mr_max_iter, nsh_max_row_nnz, nsh_max_iter, mr_col_version; HYPRE_Real mr_tol, nsh_tol; HYPRE_Real *droptol = hypre_CTAlloc(HYPRE_Real, 2, HYPRE_MEMORY_HOST); hypre_ParCSRMatrix *approx_A_inv = NULL; print_level = 0; nsh_max_iter = 2; nsh_max_row_nnz = 2; // default 1000 mr_max_iter = 1; mr_tol = 1.0e-3; mr_max_row_nnz = 2; // default 800 mr_col_version = 0; nsh_tol = 1.0e-3; droptol[0] = 1.0e-2; droptol[1] = 1.0e-2; hypre_ILUParCSRInverseNSH(A, &approx_A_inv, droptol, mr_tol, nsh_tol, DIVIDE_TOL, mr_max_row_nnz, nsh_max_row_nnz, mr_max_iter, nsh_max_iter, mr_col_version, print_level); *A_inv = approx_A_inv; if (droptol) { hypre_TFree(droptol, HYPRE_MEMORY_HOST); } return hypre_error_flag; } HYPRE_Int hypre_MGRBuildInterpApproximateInverseExp(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix *S, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { HYPRE_Int *C_marker; HYPRE_Int *F_marker; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *minus_Wp; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *fine_to_coarse = NULL; HYPRE_Int coarse_counter; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; // HYPRE_BigInt my_first_cpt; HYPRE_Int i, jj; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; // HYPRE_Int num_threads; // HYPRE_Real wall_time; /* for debugging instrumentation */ C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); // create C and F markers for (i = 0; i < n_fine; i++) { C_marker[i] = (CF_marker[i] == 1) ? 1 : -1; F_marker[i] = (CF_marker[i] == 1) ? -1 : 1; } // Get A_FC hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc); // compute -Wp minus_Wp = hypre_ParMatmul(S, A_fc); hypre_CSRMatrix *minus_Wp_diag = hypre_ParCSRMatrixDiag(minus_Wp); HYPRE_Real *minus_Wp_diag_data = hypre_CSRMatrixData(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_i = hypre_CSRMatrixI(minus_Wp_diag); HYPRE_Int *minus_Wp_diag_j = hypre_CSRMatrixJ(minus_Wp_diag); hypre_CSRMatrix *minus_Wp_offd = hypre_ParCSRMatrixOffd(minus_Wp); HYPRE_Real *minus_Wp_offd_data = hypre_CSRMatrixData(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_i = hypre_CSRMatrixI(minus_Wp_offd); HYPRE_Int *minus_Wp_offd_j = hypre_CSRMatrixJ(minus_Wp_offd); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); // num_threads = hypre_NumThreads(); // my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ HYPRE_Int row_counter = 0; coarse_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] > 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } else { /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter + 1]; jj++) { jj_counter++; } if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter + 1]; jj++) { jj_counter_offd++; } } row_counter++; } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_DEVICE); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, HYPRE_MEMORY_DEVICE); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_DEVICE); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_DEVICE); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ row_counter = 0; for (i = 0; i < n_fine; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = minus_Wp_diag_i[row_counter]; jj < minus_Wp_diag_i[row_counter + 1]; jj++) { P_diag_j[jj_counter] = minus_Wp_diag_j[jj]; P_diag_data[jj_counter] = - minus_Wp_diag_data[jj]; jj_counter++; } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = minus_Wp_offd_i[row_counter]; jj < minus_Wp_offd_i[row_counter + 1]; jj++) { P_offd_j[jj_counter_offd] = minus_Wp_offd_j[jj]; P_offd_data[jj_counter_offd] = - minus_Wp_offd_data[jj]; jj_counter_offd++; } } row_counter++; } P_offd_i[i + 1] = jj_counter_offd; } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; num_cols_P_offd = hypre_CSRMatrixNumCols(minus_Wp_offd); HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(minus_Wp); if (P_offd_size) { col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_P_offd; i++) { col_map_offd_P[i] = col_map_offd_tmp[i]; } } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); *P_ptr = P; hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(C_marker, HYPRE_MEMORY_HOST); hypre_TFree(F_marker, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(minus_Wp); return 0; } HYPRE_Int hypre_MGRBuildInterpApproximateInverse(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { HYPRE_Int *C_marker; HYPRE_Int *F_marker; hypre_ParCSRMatrix *A_ff; hypre_ParCSRMatrix *A_fc; hypre_ParCSRMatrix *A_ff_inv; hypre_ParCSRMatrix *W; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Int *P_offd_i; HYPRE_Int P_diag_nnz; HYPRE_Int n_fine = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; HYPRE_Int i; HYPRE_Real m_one = -1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_MemoryLocation memory_location_P = hypre_ParCSRMatrixMemoryLocation(A); C_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); F_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); // create C and F markers for (i = 0; i < n_fine; i++) { C_marker[i] = (CF_marker[i] == 1) ? 1 : -1; F_marker[i] = (CF_marker[i] == 1) ? -1 : 1; } // Get A_FF hypre_MGRGetSubBlock(A, F_marker, F_marker, 0, &A_ff); // hypre_ParCSRMatrixPrintIJ(A_ff, 1, 1, "A_ff"); // Get A_FC hypre_MGRGetSubBlock(A, F_marker, C_marker, 0, &A_fc); hypre_MGRApproximateInverse(A_ff, &A_ff_inv); // hypre_ParCSRMatrixPrintIJ(A_ff_inv, 1, 1, "A_ff_inv"); // hypre_ParCSRMatrixPrintIJ(A_fc, 1, 1, "A_fc"); W = hypre_ParMatmul(A_ff_inv, A_fc); hypre_ParCSRMatrixScale(W, m_one); // hypre_ParCSRMatrixPrintIJ(W, 1, 1, "Wp"); hypre_CSRMatrix *W_diag = hypre_ParCSRMatrixDiag(W); hypre_CSRMatrix *W_offd = hypre_ParCSRMatrixOffd(W); hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (my_id == (num_procs - 1)) { total_global_cpts = num_cpts_global[1]; } hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_nnz = hypre_CSRMatrixNumNonzeros(W_diag) + hypre_CSRMatrixNumCols(W_diag); P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_nnz, memory_location_P); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_nnz, memory_location_P); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine + 1, memory_location_P); /* Extend W data to P data */ hypre_ExtendWtoPHost( n_fine, CF_marker, hypre_CSRMatrixI(W_diag), hypre_CSRMatrixJ(W_diag), hypre_CSRMatrixData(W_diag), P_diag_i, P_diag_j, P_diag_data, hypre_CSRMatrixI(W_offd), P_offd_i ); // final P P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, hypre_CSRMatrixNumCols(W_offd), P_diag_nnz, hypre_CSRMatrixNumNonzeros(W_offd) ); hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(P)) = memory_location_P; hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(P)) = memory_location_P; hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(P)) = P_diag_i; hypre_CSRMatrixJ(hypre_ParCSRMatrixDiag(P)) = P_diag_j; hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(P)) = P_diag_data; hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(P)) = P_offd_i; hypre_CSRMatrixJ(hypre_ParCSRMatrixOffd(P)) = hypre_CSRMatrixJ(W_offd); hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(P)) = hypre_CSRMatrixData(W_offd); hypre_CSRMatrixJ(W_offd) = NULL; hypre_CSRMatrixData(W_offd) = NULL; num_cols_P_offd = hypre_CSRMatrixNumCols(W_offd); HYPRE_BigInt *col_map_offd_tmp = hypre_ParCSRMatrixColMapOffd(W); if (hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(P))) { col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_P_offd; i++) { col_map_offd_P[i] = col_map_offd_tmp[i]; } } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(P)) = num_cols_P_offd; } hypre_MatvecCommPkgCreate(P); *P_ptr = P; hypre_TFree(C_marker, HYPRE_MEMORY_HOST); hypre_TFree(F_marker, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDestroy(A_ff); hypre_ParCSRMatrixDestroy(A_fc); hypre_ParCSRMatrixDestroy(A_ff_inv); hypre_ParCSRMatrixDestroy(W); return 0; } /* Setup interpolation operator */ HYPRE_Int hypre_MGRBuildInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, hypre_ParCSRMatrix **P, HYPRE_Int interp_type, HYPRE_Int numsweeps) { // HYPRE_Int i; hypre_ParCSRMatrix *P_ptr = NULL; //HYPRE_Real jac_trunc_threshold = trunc_factor; //HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold; HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); /* Interpolation for each level */ if (interp_type < 3) { if (exec == HYPRE_EXEC_HOST) { // hypre_MGRBuildP(A, CF_marker, num_cpts_global, interp_type, debug_flag, &P_ptr); hypre_MGRBuildPHost(A, CF_marker, num_cpts_global, interp_type, &P_ptr); //hypre_ParCSRMatrixPrintIJ(P_ptr, 0, 0, "P_host"); } #if defined(HYPRE_USING_CUDA) else { hypre_MGRBuildPDevice(A, CF_marker, num_cpts_global, interp_type, &P_ptr); //hypre_ParCSRMatrixPrintIJ(P_ptr, 0, 0, "P_device"); } #endif /* Could do a few sweeps of Jacobi to further improve Jacobi interpolation P */ /* if(interp_type == 2) { for(i=0; i<numsweeps; i++) { hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus ); } hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } */ } else if (interp_type == 4) { if (exec == HYPRE_EXEC_HOST) { hypre_MGRBuildInterpApproximateInverse(A, CF_marker, num_cpts_global, debug_flag, &P_ptr); hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } #if defined(HYPRE_USING_CUDA) else { hypre_NoGPUSupport("interpolation"); } #endif } /* else if (interp_type == 99) { if (exec == HYPRE_EXEC_HOST) { hypre_MGRBuildInterpApproximateInverseExp(A, S, CF_marker, num_cpts_global, debug_flag, &P_ptr); hypre_BoomerAMGInterpTruncation(P_ptr, trunc_factor, max_elmts); } #if defined(HYPRE_USING_CUDA) else { hypre_NoGPUSupport("interpolation"); } #endif } */ else if (interp_type == 5) { hypre_BoomerAMGBuildModExtInterp(A, CF_marker, S, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, &P_ptr); } else if (interp_type == 6) { hypre_BoomerAMGBuildModExtPIInterp(A, CF_marker, S, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, &P_ptr); } else if (interp_type == 7) { hypre_BoomerAMGBuildModExtPEInterp(A, CF_marker, S, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, &P_ptr); } else { /* Classical modified interpolation */ hypre_BoomerAMGBuildInterp(A, CF_marker, S, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, &P_ptr); } /* set pointer to P */ *P = P_ptr; return hypre_error_flag; } /* Setup restriction operator */ HYPRE_Int hypre_MGRBuildRestrict(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Real strong_threshold, HYPRE_Real max_row_sum, hypre_ParCSRMatrix **R, HYPRE_Int restrict_type, HYPRE_Int numsweeps) { // HYPRE_Int i; hypre_ParCSRMatrix *R_ptr = NULL; hypre_ParCSRMatrix *AT = NULL; hypre_ParCSRMatrix *ST = NULL; // HYPRE_Real jac_trunc_threshold = trunc_factor; // HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold; HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); /* Build AT (transpose A) */ if (restrict_type > 0) { hypre_ParCSRMatrixTranspose(A, &AT, 1); } /* Restriction for each level */ if (restrict_type == 0) { if (exec == HYPRE_EXEC_HOST) { hypre_MGRBuildP(A, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr); //hypre_ParCSRMatrixPrintIJ(R_ptr, 0, 0, "R_host"); } #if defined(HYPRE_USING_CUDA) else { hypre_MGRBuildPDevice(A, CF_marker, num_cpts_global, restrict_type, &R_ptr); //hypre_ParCSRMatrixPrintIJ(R_ptr, 0, 0, "R_device"); } #endif } else if (restrict_type == 1 || restrict_type == 2) { if (exec == HYPRE_EXEC_HOST) { hypre_MGRBuildP(AT, CF_marker, num_cpts_global, restrict_type, debug_flag, &R_ptr); //hypre_ParCSRMatrixPrintIJ(R_ptr, 0, 0, "R_host"); } #if defined(HYPRE_USING_CUDA) else { hypre_MGRBuildPDevice(AT, CF_marker, num_cpts_global, restrict_type, &R_ptr); //hypre_ParCSRMatrixPrintIJ(R_ptr, 0, 0, "R_device"); } #endif } else if (restrict_type == 3) { /* move diagonal to first entry */ hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(AT)); hypre_MGRBuildInterpApproximateInverse(AT, CF_marker, num_cpts_global, debug_flag, &R_ptr); hypre_BoomerAMGInterpTruncation(R_ptr, trunc_factor, max_elmts); } else { /* Build new strength matrix */ hypre_BoomerAMGCreateS(AT, strong_threshold, max_row_sum, 1, NULL, &ST); /* Classical modified interpolation */ hypre_BoomerAMGBuildInterp(AT, CF_marker, ST, num_cpts_global, 1, NULL, debug_flag, trunc_factor, max_elmts, &R_ptr); } /* set pointer to P */ *R = R_ptr; /* Free memory */ if (restrict_type > 0) { hypre_ParCSRMatrixDestroy(AT); } if (restrict_type > 5) { hypre_ParCSRMatrixDestroy(ST); } return hypre_error_flag; } void hypre_blas_smat_inv_n4 (HYPRE_Real *a) { const HYPRE_Real a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3]; const HYPRE_Real a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7]; const HYPRE_Real a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11]; const HYPRE_Real a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15]; const HYPRE_Real M11 = a22 * a33 * a44 + a23 * a34 * a42 + a24 * a32 * a43 - a22 * a34 * a43 - a23 * a32 * a44 - a24 * a33 * a42; const HYPRE_Real M12 = a12 * a34 * a43 + a13 * a32 * a44 + a14 * a33 * a42 - a12 * a33 * a44 - a13 * a34 * a42 - a14 * a32 * a43; const HYPRE_Real M13 = a12 * a23 * a44 + a13 * a24 * a42 + a14 * a22 * a43 - a12 * a24 * a43 - a13 * a22 * a44 - a14 * a23 * a42; const HYPRE_Real M14 = a12 * a24 * a33 + a13 * a22 * a34 + a14 * a23 * a32 - a12 * a23 * a34 - a13 * a24 * a32 - a14 * a22 * a33; const HYPRE_Real M21 = a21 * a34 * a43 + a23 * a31 * a44 + a24 * a33 * a41 - a21 * a33 * a44 - a23 * a34 * a41 - a24 * a31 * a43; const HYPRE_Real M22 = a11 * a33 * a44 + a13 * a34 * a41 + a14 * a31 * a43 - a11 * a34 * a43 - a13 * a31 * a44 - a14 * a33 * a41; const HYPRE_Real M23 = a11 * a24 * a43 + a13 * a21 * a44 + a14 * a23 * a41 - a11 * a23 * a44 - a13 * a24 * a41 - a14 * a21 * a43; const HYPRE_Real M24 = a11 * a23 * a34 + a13 * a24 * a31 + a14 * a21 * a33 - a11 * a24 * a33 - a13 * a21 * a34 - a14 * a23 * a31; const HYPRE_Real M31 = a21 * a32 * a44 + a22 * a34 * a41 + a24 * a31 * a42 - a21 * a34 * a42 - a22 * a31 * a44 - a24 * a32 * a41; const HYPRE_Real M32 = a11 * a34 * a42 + a12 * a31 * a44 + a14 * a32 * a41 - a11 * a32 * a44 - a12 * a34 * a41 - a14 * a31 * a42; const HYPRE_Real M33 = a11 * a22 * a44 + a12 * a24 * a41 + a14 * a21 * a42 - a11 * a24 * a42 - a12 * a21 * a44 - a14 * a22 * a41; const HYPRE_Real M34 = a11 * a24 * a32 + a12 * a21 * a34 + a14 * a22 * a31 - a11 * a22 * a34 - a12 * a24 * a31 - a14 * a21 * a32; const HYPRE_Real M41 = a21 * a33 * a42 + a22 * a31 * a43 + a23 * a32 * a41 - a21 * a32 * a43 - a22 * a33 * a41 - a23 * a31 * a42; const HYPRE_Real M42 = a11 * a32 * a43 + a12 * a33 * a41 + a13 * a31 * a42 - a11 * a33 * a42 - a12 * a31 * a43 - a13 * a32 * a41; const HYPRE_Real M43 = a11 * a23 * a42 + a12 * a21 * a43 + a13 * a22 * a41 - a11 * a22 * a43 - a12 * a23 * a41 - a13 * a21 * a42; const HYPRE_Real M44 = a11 * a22 * a33 + a12 * a23 * a31 + a13 * a21 * a32 - a11 * a23 * a32 - a12 * a21 * a33 - a13 * a22 * a31; const HYPRE_Real det = a11 * M11 + a12 * M21 + a13 * M31 + a14 * M41; HYPRE_Real det_inv; //if ( fabs(det) < 1e-22 ) { //hypre_printf("### WARNING: Matrix is nearly singular! det = %e\n", det); /* printf("##----------------------------------------------\n"); printf("## %12.5e %12.5e %12.5e \n", a0, a1, a2); printf("## %12.5e %12.5e %12.5e \n", a3, a4, a5); printf("## %12.5e %12.5e %12.5e \n", a5, a6, a7); printf("##----------------------------------------------\n"); getchar(); */ //} det_inv = 1.0 / det; a[0] = M11 * det_inv; a[1] = M12 * det_inv; a[2] = M13 * det_inv; a[3] = M14 * det_inv; a[4] = M21 * det_inv; a[5] = M22 * det_inv; a[6] = M23 * det_inv; a[7] = M24 * det_inv; a[8] = M31 * det_inv; a[9] = M32 * det_inv; a[10] = M33 * det_inv; a[11] = M34 * det_inv; a[12] = M41 * det_inv; a[13] = M42 * det_inv; a[14] = M43 * det_inv; a[15] = M44 * det_inv; } void hypre_blas_mat_inv(HYPRE_Real *a, HYPRE_Int n) { HYPRE_Int i, j, k, l, u, kn, in; HYPRE_Real alinv; if (n == 4) { hypre_blas_smat_inv_n4(a); } else { for (k = 0; k < n; ++k) { kn = k * n; l = kn + k; //if (fabs(a[l]) < SMALLREAL) { // printf("### WARNING: Diagonal entry is close to zero!"); // printf("### WARNING: diag_%d=%e\n", k, a[l]); // a[l] = SMALLREAL; //} alinv = 1.0 / a[l]; a[l] = alinv; for (j = 0; j < k; ++j) { u = kn + j; a[u] *= alinv; } for (j = k + 1; j < n; ++j) { u = kn + j; a[u] *= alinv; } for (i = 0; i < k; ++i) { in = i * n; for (j = 0; j < n; ++j) if (j != k) { u = in + j; a[u] -= a[in + k] * a[kn + j]; } // end if (j!=k) } for (i = k + 1; i < n; ++i) { in = i * n; for (j = 0; j < n; ++j) if (j != k) { u = in + j; a[u] -= a[in + k] * a[kn + j]; } // end if (j!=k) } for (i = 0; i < k; ++i) { u = i * n + k; a[u] *= -alinv; } for (i = k + 1; i < n; ++i) { u = i * n + k; a[u] *= -alinv; } } // end for (k=0; k<n; ++k) }// end if } HYPRE_Int hypre_block_jacobi_scaling(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **B_ptr, void *mgr_vdata, HYPRE_Int debug_flag) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int num_procs, my_id; HYPRE_Int blk_size = (mgr_data -> block_size); HYPRE_Int reserved_coarse_size = (mgr_data -> reserved_coarse_size); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_ParCSRMatrix *B; hypre_CSRMatrix *B_diag; HYPRE_Real *B_diag_data; HYPRE_Int *B_diag_i; HYPRE_Int *B_diag_j; hypre_CSRMatrix *B_offd; HYPRE_Int i, ii; HYPRE_Int j, jj; HYPRE_Int k; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int n_block, left_size, inv_size; // HYPRE_Real wall_time; /* for debugging instrumentation */ HYPRE_Int bidx, bidxm1, bidxp1; HYPRE_Real * diaginv; const HYPRE_Int nb2 = blk_size * blk_size; HYPRE_Int block_scaling_error = 0; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); // HYPRE_Int num_threads = hypre_NumThreads(); //printf("n = %d\n",n); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size * n_block; } else { n_block = n / blk_size; left_size = n - blk_size * n_block; } inv_size = nb2 * n_block + left_size * left_size; //printf("inv_size = %d\n",inv_size); hypre_blockRelax_setup(A, blk_size, reserved_coarse_size, &(mgr_data -> diaginv)); // if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*----------------------------------------------------------------------- * First Pass: Determine size of B and fill in *-----------------------------------------------------------------------*/ B_diag_i = hypre_CTAlloc(HYPRE_Int, n + 1, HYPRE_MEMORY_HOST); B_diag_j = hypre_CTAlloc(HYPRE_Int, inv_size, HYPRE_MEMORY_HOST); B_diag_data = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); B_diag_i[n] = inv_size; //B_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); //B_offd_j = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); //B_offd_data = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST); //B_offd_i[n] = 1; /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ diaginv = hypre_CTAlloc(HYPRE_Real, nb2, HYPRE_MEMORY_HOST); //printf("n_block = %d\n",n_block); for (i = 0; i < n_block; i++) { bidxm1 = i * blk_size; bidxp1 = (i + 1) * blk_size; for (k = 0; k < blk_size; k++) { for (j = 0; j < blk_size; j++) { bidx = k * blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1 + k]; ii < A_diag_i[bidxm1 + k + 1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = k * blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } /* for (k = 0;k < blk_size; k++) */ /* { */ /* for (j = 0;j < blk_size; j++) */ /* { */ /* bidx = k*blk_size + j; */ /* printf("diaginv[%d] = %e\n",bidx,diaginv[bidx]); */ /* } */ /* } */ hypre_blas_mat_inv(diaginv, blk_size); for (k = 0; k < blk_size; k++) { B_diag_i[i * blk_size + k] = i * nb2 + k * blk_size; //B_offd_i[i*nb2+k] = 0; for (j = 0; j < blk_size; j++) { bidx = i * nb2 + k * blk_size + j; B_diag_j[bidx] = i * blk_size + j; B_diag_data[bidx] = diaginv[k * blk_size + j]; } } } //printf("Before create\n"); B = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), 0, inv_size, 0); //printf("After create\n"); B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrixData(B_diag) = B_diag_data; hypre_CSRMatrixI(B_diag) = B_diag_i; hypre_CSRMatrixJ(B_diag) = B_diag_j; B_offd = hypre_ParCSRMatrixOffd(B); hypre_CSRMatrixData(B_offd) = NULL; hypre_CSRMatrixI(B_offd) = NULL; hypre_CSRMatrixJ(B_offd) = NULL; *B_ptr = B; return (block_scaling_error); } HYPRE_Int hypre_blockRelax_solve (hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Real blk_size, HYPRE_Int n_block, HYPRE_Int left_size, HYPRE_Int method, HYPRE_Real *diaginv, hypre_ParVector *Vtemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data; HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx, bidx1; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, my_id; HYPRE_Real *res; const HYPRE_Int nb2 = blk_size * blk_size; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); // HYPRE_Int num_threads = hypre_NumThreads(); res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; //printf("u_old[%d] = %e\n",i,Vtemp_data[i]); } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax points block by block *-----------------------------------------------------------------*/ for (i = 0; i < n_block; i++) { for (j = 0; j < blk_size; j++) { bidx = i * blk_size + j; res[j] = f_data[bidx]; for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx + 1]; jj++) { ii = A_diag_j[jj]; if (method == 0) { // Jacobi for diagonal part res[j] -= A_diag_data[jj] * Vtemp_data[ii]; } else if (method == 1) { // Gauss-Seidel for diagonal part res[j] -= A_diag_data[jj] * u_data[ii]; } else { // Default do Jacobi for diagonal part res[j] -= A_diag_data[jj] * Vtemp_data[ii]; } //printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]); } for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx + 1]; jj++) { // always do Jacobi for off-diagonal part ii = A_offd_j[jj]; res[j] -= A_offd_data[jj] * Vext_data[ii]; } //printf("%d: res = %e\n",bidx,res[j]); } for (j = 0; j < blk_size; j++) { bidx1 = i * blk_size + j; for (k = 0; k < blk_size; k++) { bidx = i * nb2 + j * blk_size + k; u_data[bidx1] += res[k] * diaginv[bidx]; //printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]); } //printf("u[%d] = %e\n",bidx1,u_data[bidx1]); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } hypre_TFree(res, HYPRE_MEMORY_HOST); return (relax_error); } HYPRE_Int hypre_block_gs (hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Real blk_size, HYPRE_Int n_block, HYPRE_Int left_size, HYPRE_Real *diaginv, hypre_ParVector *Vtemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data; HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx, bidx1; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, my_id; HYPRE_Real *res; const HYPRE_Int nb2 = blk_size * blk_size; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //HYPRE_Int num_threads = hypre_NumThreads(); res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; //printf("u_old[%d] = %e\n",i,Vtemp_data[i]); } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax points block by block *-----------------------------------------------------------------*/ for (i = 0; i < n_block; i++) { for (j = 0; j < blk_size; j++) { bidx = i * blk_size + j; res[j] = f_data[bidx]; for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx + 1]; jj++) { ii = A_diag_j[jj]; //res[j] -= A_diag_data[jj] * Vtemp_data[ii]; //printf("my_id = %d, %d: Au = %e * %e\n",my_id,ii,A_diag_data[jj],Vtemp_data[ii]); res[j] -= A_diag_data[jj] * u_data[ii]; //printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]); } for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx + 1]; jj++) { ii = A_offd_j[jj]; res[j] -= A_offd_data[jj] * Vext_data[ii]; } //printf("%d: res = %e\n",bidx,res[j]); } for (j = 0; j < blk_size; j++) { bidx1 = i * blk_size + j; for (k = 0; k < blk_size; k++) { bidx = i * nb2 + j * blk_size + k; u_data[bidx1] += res[k] * diaginv[bidx]; //printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]); } //printf("u[%d] = %e\n",bidx1,u_data[bidx1]); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } hypre_TFree(res, HYPRE_MEMORY_HOST); return (relax_error); } /*Block smoother*/ HYPRE_Int hypre_blockRelax_setup(hypre_ParCSRMatrix *A, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Real **diaginvptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx, bidxm1, bidxp1; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size * blk_size; HYPRE_Int n_block; HYPRE_Int left_size, inv_size; HYPRE_Real *diaginv = *diaginvptr; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size * n_block; } else { n_block = n / blk_size; left_size = n - blk_size * n_block; } inv_size = nb2 * n_block + left_size * left_size; if (diaginv != NULL) { hypre_TFree(diaginv, HYPRE_MEMORY_HOST); diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } else { diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0; i < n_block; i++) { bidxm1 = i * blk_size; bidxp1 = (i + 1) * blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0; k < blk_size; k++) { for (j = 0; j < blk_size; j++) { bidx = i * nb2 + k * blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1 + k]; ii < A_diag_i[bidxm1 + k + 1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i * nb2 + k * blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0; i < left_size; i++) { bidxm1 = n_block * nb2 + i * blk_size; bidxp1 = n_block * nb2 + (i + 1) * blk_size; for (j = 0; j < left_size; j++) { bidx = n_block * nb2 + i * blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block * blk_size + i]; ii < A_diag_i[n_block * blk_size + i + 1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block * blk_size) { bidx = n_block * nb2 + i * blk_size + jj - n_block * blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0; i < n_block; i++) { hypre_blas_mat_inv(diaginv + i * nb2, blk_size); } hypre_blas_mat_inv(diaginv + (HYPRE_Int)(blk_size * nb2), left_size); } else { for (i = 0; i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) { diaginv[i] = 0.0; } else { diaginv[i] = 1.0 / diaginv[i]; } } } *diaginvptr = diaginv; return 1; } HYPRE_Int hypre_blockRelax(hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Int method, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx, bidxm1, bidxp1; HYPRE_Int relax_error = 0; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size * blk_size; HYPRE_Int n_block; HYPRE_Int left_size, inv_size; HYPRE_Real *diaginv; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size * n_block; } else { n_block = n / blk_size; left_size = n - blk_size * n_block; } inv_size = nb2 * n_block + left_size * left_size; diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0; i < n_block; i++) { bidxm1 = i * blk_size; bidxp1 = (i + 1) * blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0; k < blk_size; k++) { for (j = 0; j < blk_size; j++) { bidx = i * nb2 + k * blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1 + k]; ii < A_diag_i[bidxm1 + k + 1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i * nb2 + k * blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0; i < left_size; i++) { bidxm1 = n_block * nb2 + i * blk_size; bidxp1 = n_block * nb2 + (i + 1) * blk_size; for (j = 0; j < left_size; j++) { bidx = n_block * nb2 + i * blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block * blk_size + i]; ii < A_diag_i[n_block * blk_size + i + 1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block * blk_size) { bidx = n_block * nb2 + i * blk_size + jj - n_block * blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0; i < n_block; i++) { hypre_blas_mat_inv(diaginv + i * nb2, blk_size); } hypre_blas_mat_inv(diaginv + (HYPRE_Int)(blk_size * nb2), left_size); /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ } else { for (i = 0; i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) { diaginv[i] = 0.0; } else { diaginv[i] = 1.0 / diaginv[i]; } } } hypre_blockRelax_solve(A, f, u, blk_size, n_block, left_size, method, diaginv, Vtemp); /*----------------------------------------------------------------- * Free temperary memeory *-----------------------------------------------------------------*/ hypre_TFree(diaginv, HYPRE_MEMORY_HOST); return (relax_error); } /* set coarse grid solver */ HYPRE_Int hypre_MGRSetFSolver( void *mgr_vdata, HYPRE_Int (*fine_grid_solver_solve)(void*, void*, void*, void*), HYPRE_Int (*fine_grid_solver_setup)(void*, void*, void*, void*), void *fsolver ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); HYPRE_Solver **aff_solver = (mgr_data -> aff_solver); if (aff_solver == NULL) { aff_solver = hypre_CTAlloc(HYPRE_Solver*, max_num_coarse_levels, HYPRE_MEMORY_HOST); } /* only allow to set F-solver for the first level */ aff_solver[0] = (HYPRE_Solver *) fsolver; (mgr_data -> fine_grid_solver_solve) = fine_grid_solver_solve; (mgr_data -> fine_grid_solver_setup) = fine_grid_solver_setup; (mgr_data -> aff_solver) = aff_solver; (mgr_data -> fsolver_mode) = 0; return hypre_error_flag; } /* set coarse grid solver */ HYPRE_Int hypre_MGRSetCoarseSolver( void *mgr_vdata, HYPRE_Int (*coarse_grid_solver_solve)(void*, void*, void*, void*), HYPRE_Int (*coarse_grid_solver_setup)(void*, void*, void*, void*), void *coarse_grid_solver ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } (mgr_data -> coarse_grid_solver_solve) = coarse_grid_solver_solve; (mgr_data -> coarse_grid_solver_setup) = coarse_grid_solver_setup; (mgr_data -> coarse_grid_solver) = (HYPRE_Solver) coarse_grid_solver; (mgr_data -> use_default_cgrid_solver) = 0; return hypre_error_flag; } HYPRE_Int hypre_MGRSetAffInv( void *mgr_vdata, hypre_ParCSRMatrix *A_ff_inv ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> A_ff_inv) = A_ff_inv; return hypre_error_flag; } /* Set the maximum number of coarse levels. * maxcoarselevs = 1 yields the default 2-grid scheme. */ HYPRE_Int hypre_MGRSetMaxCoarseLevels( void *mgr_vdata, HYPRE_Int maxcoarselevs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_num_coarse_levels) = maxcoarselevs; return hypre_error_flag; } /* Set the system block size */ HYPRE_Int hypre_MGRSetBlockSize( void *mgr_vdata, HYPRE_Int bsize ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> block_size) = bsize; return hypre_error_flag; } /* Set the relaxation type for the fine levels of the reduction. * Currently supports the following flavors of relaxation types * as described in the documentation: * relax_types 0 - 8, 13, 14, 18, 19, 98. * See par_relax.c and par_relax_more.c for more details. * */ HYPRE_Int hypre_MGRSetRelaxType( void *mgr_vdata, HYPRE_Int relax_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> relax_type) = relax_type; return hypre_error_flag; } /* Set the number of relaxation sweeps */ HYPRE_Int hypre_MGRSetNumRelaxSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_relax_sweeps) = nsweeps; return hypre_error_flag; } /* Set the F-relaxation strategy: 0=single level, 1=multi level */ HYPRE_Int hypre_MGRSetFRelaxMethod( void *mgr_vdata, HYPRE_Int relax_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> Frelax_method) != NULL) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_coarse_levels; i++) { Frelax_method[i] = relax_method; } (mgr_data -> Frelax_method) = Frelax_method; return hypre_error_flag; } /* Set the F-relaxation strategy: 0=single level, 1=multi level */ HYPRE_Int hypre_MGRSetLevelFRelaxMethod( void *mgr_vdata, HYPRE_Int *relax_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> Frelax_method) != NULL) { hypre_TFree(mgr_data -> Frelax_method, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_method) = NULL; } HYPRE_Int *Frelax_method = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (relax_method != NULL) { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_method[i] = relax_method[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_method[i] = 0; } } (mgr_data -> Frelax_method) = Frelax_method; return hypre_error_flag; } /* Coarse grid method: 0=Galerkin RAP, 1=non-Galerkin with dropping*/ HYPRE_Int hypre_MGRSetCoarseGridMethod( void *mgr_vdata, HYPRE_Int *cg_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> use_non_galerkin_cg) != NULL) { hypre_TFree(mgr_data -> use_non_galerkin_cg, HYPRE_MEMORY_HOST); (mgr_data -> use_non_galerkin_cg) = NULL; } HYPRE_Int *use_non_galerkin_cg = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (cg_method != NULL) { for (i = 0; i < max_num_coarse_levels; i++) { use_non_galerkin_cg[i] = cg_method[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { use_non_galerkin_cg[i] = 0; } } (mgr_data -> use_non_galerkin_cg) = use_non_galerkin_cg; return hypre_error_flag; } /* Set the F-relaxation number of functions for each level */ HYPRE_Int hypre_MGRSetLevelFRelaxNumFunctions( void *mgr_vdata, HYPRE_Int *num_functions ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> Frelax_num_functions) != NULL) { hypre_TFree(mgr_data -> Frelax_num_functions, HYPRE_MEMORY_HOST); (mgr_data -> Frelax_num_functions) = NULL; } HYPRE_Int *Frelax_num_functions = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (num_functions != NULL) { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_num_functions[i] = num_functions[i]; } } else { for (i = 0; i < max_num_coarse_levels; i++) { Frelax_num_functions[i] = 1; } } (mgr_data -> Frelax_num_functions) = Frelax_num_functions; return hypre_error_flag; } /* Set the type of the restriction type * for computing restriction operator */ HYPRE_Int hypre_MGRSetLevelRestrictType( void *mgr_vdata, HYPRE_Int *restrict_type) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> restrict_type) != NULL) { hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (restrict_type != NULL) { for (i = 0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = *(restrict_type + i); } } else { for (i = 0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = 0; } } (mgr_data -> restrict_type) = level_restrict_type; return hypre_error_flag; } /* Set the type of the restriction type * for computing restriction operator */ HYPRE_Int hypre_MGRSetRestrictType( void *mgr_vdata, HYPRE_Int restrict_type) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> restrict_type) != NULL) { hypre_TFree((mgr_data -> restrict_type), HYPRE_MEMORY_HOST); (mgr_data -> restrict_type) = NULL; } HYPRE_Int *level_restrict_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_coarse_levels; i++) { level_restrict_type[i] = restrict_type; } (mgr_data -> restrict_type) = level_restrict_type; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumRestrictSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_restrict_sweeps) = nsweeps; return hypre_error_flag; } /* Set the type of the interpolation * for computing interpolation operator */ HYPRE_Int hypre_MGRSetInterpType( void *mgr_vdata, HYPRE_Int interpType) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> interp_type) != NULL) { hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_coarse_levels; i++) { level_interp_type[i] = interpType; } (mgr_data -> interp_type) = level_interp_type; return hypre_error_flag; } /* Set the type of the interpolation * for computing interpolation operator */ HYPRE_Int hypre_MGRSetLevelInterpType( void *mgr_vdata, HYPRE_Int *interpType) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); if ((mgr_data -> interp_type) != NULL) { hypre_TFree((mgr_data -> interp_type), HYPRE_MEMORY_HOST); (mgr_data -> interp_type) = NULL; } HYPRE_Int *level_interp_type = hypre_CTAlloc(HYPRE_Int, max_num_coarse_levels, HYPRE_MEMORY_HOST); if (interpType != NULL) { for (i = 0; i < max_num_coarse_levels; i++) { level_interp_type[i] = *(interpType + i); } } else { for (i = 0; i < max_num_coarse_levels; i++) { level_interp_type[i] = 2; } } (mgr_data -> interp_type) = level_interp_type; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumInterpSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_interp_sweeps) = nsweeps; return hypre_error_flag; } /* Set the threshold to truncate the coarse grid at each * level of reduction */ HYPRE_Int hypre_MGRSetTruncateCoarseGridThreshold( void *mgr_vdata, HYPRE_Real threshold) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> truncate_coarse_grid_threshold) = threshold; return hypre_error_flag; } /* Set print level for F-relaxation solver */ HYPRE_Int hypre_MGRSetFrelaxPrintLevel( void *mgr_vdata, HYPRE_Int print_level ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> frelax_print_level) = print_level; return hypre_error_flag; } /* Set print level for coarse grid solver */ HYPRE_Int hypre_MGRSetCoarseGridPrintLevel( void *mgr_vdata, HYPRE_Int print_level ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> cg_print_level) = print_level; return hypre_error_flag; } /* Set print level for mgr solver */ HYPRE_Int hypre_MGRSetPrintLevel( void *mgr_vdata, HYPRE_Int print_level ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> print_level) = print_level; return hypre_error_flag; } /* Set logging level for mgr solver */ HYPRE_Int hypre_MGRSetLogging( void *mgr_vdata, HYPRE_Int logging ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> logging) = logging; return hypre_error_flag; } /* Set max number of iterations for mgr solver */ HYPRE_Int hypre_MGRSetMaxIter( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_iter) = max_iter; return hypre_error_flag; } /* Set convergence tolerance for mgr solver */ HYPRE_Int hypre_MGRSetTol( void *mgr_vdata, HYPRE_Real tol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> tol) = tol; return hypre_error_flag; } /* Set max number of iterations for mgr global smoother */ HYPRE_Int hypre_MGRSetMaxGlobalsmoothIters( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_iters) = max_iter; return hypre_error_flag; } /* Set global smoothing type for mgr solver */ HYPRE_Int hypre_MGRSetGlobalsmoothType( void *mgr_vdata, HYPRE_Int iter_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_type) = iter_type; return hypre_error_flag; } /* Set the maximum number of non-zero entries for restriction and interpolation operator if classical AMG interpolation is used */ HYPRE_Int hypre_MGRSetPMaxElmts( void *mgr_vdata, HYPRE_Int P_max_elmts) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> P_max_elmts) = P_max_elmts; return hypre_error_flag; } /* Get number of iterations for MGR solver */ HYPRE_Int hypre_MGRGetNumIterations( void *mgr_vdata, HYPRE_Int *num_iterations ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *num_iterations = mgr_data->num_iterations; return hypre_error_flag; } /* Get residual norms for MGR solver */ HYPRE_Int hypre_MGRGetFinalRelativeResidualNorm( void *mgr_vdata, HYPRE_Real *res_norm ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *res_norm = mgr_data->final_rel_residual_norm; return hypre_error_flag; } HYPRE_Int hypre_MGRGetCoarseGridConvergenceFactor( void *mgr_vdata, HYPRE_Real *conv_factor ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *conv_factor = (mgr_data -> cg_convergence_factor); return hypre_error_flag; } /* Build A_FF matrix from A given a CF_marker array */ HYPRE_Int hypre_MGRGetSubBlock( hypre_ParCSRMatrix *A, HYPRE_Int *row_cf_marker, HYPRE_Int *col_cf_marker, HYPRE_Int debug_flag, hypre_ParCSRMatrix **A_block_ptr ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); //HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A); hypre_IntArray *coarse_dof_func_ptr = NULL; HYPRE_BigInt num_row_cpts_global[2]; HYPRE_BigInt num_col_cpts_global[2]; hypre_ParCSRMatrix *Ablock; HYPRE_BigInt *col_map_offd_Ablock; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *Ablock_diag; hypre_CSRMatrix *Ablock_offd; HYPRE_Real *Ablock_diag_data; HYPRE_Int *Ablock_diag_i; HYPRE_Int *Ablock_diag_j; HYPRE_Real *Ablock_offd_data; HYPRE_Int *Ablock_offd_i; HYPRE_Int *Ablock_offd_j; HYPRE_Int Ablock_diag_size, Ablock_offd_size; HYPRE_Int *Ablock_marker; HYPRE_Int ii_counter; HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int start_indexing = 0; /* start indexing for Aff_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; HYPRE_Int *coarse_counter; HYPRE_Int *col_coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_row_cpts; HYPRE_BigInt total_global_col_cpts; HYPRE_Int num_cols_Ablock_offd; // HYPRE_BigInt my_first_row_cpt, my_first_col_cpt; HYPRE_Int i, i1; HYPRE_Int j, jl, jj; HYPRE_Int start; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); hypre_IntArray *wrap_cf; // HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); //num_threads = hypre_NumThreads(); // Temporary fix, disable threading // TODO: enable threading num_threads = 1; /* get the number of coarse rows */ wrap_cf = hypre_IntArrayCreate(local_numrows); hypre_IntArrayMemoryLocation(wrap_cf) = HYPRE_MEMORY_HOST; hypre_IntArrayData(wrap_cf) = row_cf_marker; hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, wrap_cf, &coarse_dof_func_ptr, num_row_cpts_global); hypre_IntArrayDestroy(coarse_dof_func_ptr); coarse_dof_func_ptr = NULL; //hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_row_cpts_global[0], num_row_cpts_global[1]); // my_first_row_cpt = num_row_cpts_global[0]; if (my_id == (num_procs - 1)) { total_global_row_cpts = num_row_cpts_global[1]; } hypre_MPI_Bcast(&total_global_row_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /* get the number of coarse rows */ hypre_IntArrayData(wrap_cf) = col_cf_marker; hypre_BoomerAMGCoarseParms(comm, local_numrows, 1, NULL, wrap_cf, &coarse_dof_func_ptr, num_col_cpts_global); hypre_IntArrayDestroy(coarse_dof_func_ptr); coarse_dof_func_ptr = NULL; //hypre_printf("my_id = %d, cpts_this = %d, cpts_next = %d\n", my_id, num_col_cpts_global[0], num_col_cpts_global[1]); // my_first_col_cpt = num_col_cpts_global[0]; if (my_id == (num_procs - 1)) { total_global_col_cpts = num_col_cpts_global[1]; } hypre_MPI_Bcast(&total_global_col_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm); /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } // if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = col_cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); /*----------------------------------------------------------------------- * First Pass: Determine size of Ablock and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); col_coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) { fine_to_coarse[i] = -1; } jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a F-point, we loop through the columns and select * the F-columns. Also set up mapping vector. *--------------------------------------------------------------------*/ if (col_cf_marker[i] > 0) { fine_to_coarse[i] = col_coarse_counter[j]; col_coarse_counter[j]++; } if (row_cf_marker[i] > 0) { //fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if (col_cf_marker[i1] > 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] > 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i = 0; i < num_threads - 1; i++) { jj_count[i + 1] += jj_count[i]; jj_count_offd[i + 1] += jj_count_offd[i]; coarse_counter[i + 1] += coarse_counter[i]; col_coarse_counter[i + 1] += col_coarse_counter[i]; } i = num_threads - 1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; ii_counter = coarse_counter[i]; Ablock_diag_size = jj_counter; Ablock_diag_i = hypre_CTAlloc(HYPRE_Int, ii_counter + 1, memory_location); Ablock_diag_j = hypre_CTAlloc(HYPRE_Int, Ablock_diag_size, memory_location); Ablock_diag_data = hypre_CTAlloc(HYPRE_Real, Ablock_diag_size, memory_location); Ablock_diag_i[ii_counter] = jj_counter; Ablock_offd_size = jj_counter_offd; Ablock_offd_i = hypre_CTAlloc(HYPRE_Int, ii_counter + 1, memory_location); Ablock_offd_j = hypre_CTAlloc(HYPRE_Int, Ablock_offd_size, memory_location); Ablock_offd_data = hypre_CTAlloc(HYPRE_Real, Ablock_offd_size, memory_location); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; //----------------------------------------------------------------------- // Send and receive fine_to_coarse info. //----------------------------------------------------------------------- // if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) { coarse_shift = col_coarse_counter[j - 1]; } size = n_fine / num_threads; rest = n_fine - size * num_threads; if (j < rest) { ns = j * size + j; ne = (j + 1) * size + j + 1; } else { ns = j * size + rest; ne = (j + 1) * size + rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } // if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif // for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_col_cpt; #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,jl,i1,jj,ns,ne,size,rest,jj_counter,jj_counter_offd,ii_counter) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine / num_threads; rest = n_fine - size * num_threads; if (jl < rest) { ns = jl * size + jl; ne = (jl + 1) * size + jl + 1; } else { ns = jl * size + rest; ne = (jl + 1) * size + rest; } jj_counter = 0; if (jl > 0) { jj_counter = jj_count[jl - 1]; } jj_counter_offd = 0; if (jl > 0) { jj_counter_offd = jj_count_offd[jl - 1]; } ii_counter = 0; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a F-point, we loop through the columns and select * the F-columns. Also set up mapping vector. *--------------------------------------------------------------------*/ if (row_cf_marker[i] > 0) { // Diagonal part of Ablock // Ablock_diag_i[ii_counter] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i + 1]; jj++) { i1 = A_diag_j[jj]; if (col_cf_marker[i1] > 0) { Ablock_diag_j[jj_counter] = fine_to_coarse[i1]; Ablock_diag_data[jj_counter] = A_diag_data[jj]; jj_counter++; } } // Off-Diagonal part of Ablock // Ablock_offd_i[ii_counter] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i + 1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] > 0) { Ablock_offd_j[jj_counter_offd] = i1; Ablock_offd_data[jj_counter_offd] = A_offd_data[jj]; jj_counter_offd++; } } } ii_counter++; } } Ablock_offd_i[ii_counter] = jj_counter_offd; Ablock_diag_i[ii_counter] = jj_counter; } Ablock = hypre_ParCSRMatrixCreate(comm, total_global_row_cpts, total_global_col_cpts, num_row_cpts_global, num_col_cpts_global, 0, Ablock_diag_i[ii_counter], Ablock_offd_i[ii_counter]); Ablock_diag = hypre_ParCSRMatrixDiag(Ablock); hypre_CSRMatrixData(Ablock_diag) = Ablock_diag_data; hypre_CSRMatrixI(Ablock_diag) = Ablock_diag_i; hypre_CSRMatrixJ(Ablock_diag) = Ablock_diag_j; Ablock_offd = hypre_ParCSRMatrixOffd(Ablock); hypre_CSRMatrixData(Ablock_offd) = Ablock_offd_data; hypre_CSRMatrixI(Ablock_offd) = Ablock_offd_i; hypre_CSRMatrixJ(Ablock_offd) = Ablock_offd_j; num_cols_Ablock_offd = 0; if (Ablock_offd_size) { Ablock_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < num_cols_A_offd; i++) { Ablock_marker[i] = 0; } num_cols_Ablock_offd = 0; for (i = 0; i < Ablock_offd_size; i++) { index = Ablock_offd_j[i]; if (!Ablock_marker[index]) { num_cols_Ablock_offd++; Ablock_marker[index] = 1; } } col_map_offd_Ablock = hypre_CTAlloc(HYPRE_BigInt, num_cols_Ablock_offd, memory_location); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_Ablock_offd, HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_cols_Ablock_offd; i++) { while (Ablock_marker[index] == 0) { index++; } tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < Ablock_offd_size; i++) Ablock_offd_j[i] = hypre_BinarySearch(tmp_map_offd, Ablock_offd_j[i], num_cols_Ablock_offd); hypre_TFree(Ablock_marker, HYPRE_MEMORY_HOST); } if (num_cols_Ablock_offd) { hypre_ParCSRMatrixColMapOffd(Ablock) = col_map_offd_Ablock; hypre_CSRMatrixNumCols(Ablock_offd) = num_cols_Ablock_offd; } hypre_GetCommPkgRTFromCommPkgA(Ablock, A, fine_to_coarse, tmp_map_offd); /* Create the assumed partition */ if (hypre_ParCSRMatrixAssumedPartition(Ablock) == NULL) { hypre_ParCSRMatrixCreateAssumedPartition(Ablock); } *A_block_ptr = Ablock; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(col_coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return (0); } /* Build A_FF matrix from A given a CF_marker array */ HYPRE_Int hypre_MGRBuildAff( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_Int debug_flag, hypre_ParCSRMatrix **A_ff_ptr ) { HYPRE_Int i; HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* create a copy of the CF_marker array and switch C-points to F-points */ HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_numrows, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < local_numrows; i++) { CF_marker_copy[i] = -CF_marker[i]; } hypre_MGRGetSubBlock(A, CF_marker_copy, CF_marker_copy, debug_flag, A_ff_ptr); /* Free copy of CF marker */ hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST); return (0); } /********************************************************************************* * This routine assumes that the 'toVector' is larger than the 'fromVector' and * the CF_marker is of the same length as the toVector. There must be n 'point_type' * values in the CF_marker, where n is the length of the 'fromVector'. * It adds the values of the 'fromVector' to the 'toVector' where the marker is the * same as the 'point_type' *********************************************************************************/ HYPRE_Int hypre_MGRAddVectorP ( hypre_IntArray *CF_marker, HYPRE_Int point_type, HYPRE_Real a, hypre_ParVector *fromVector, HYPRE_Real b, hypre_ParVector **toVector ) { hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector); HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal); hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector); HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal); HYPRE_Int *CF_marker_data = hypre_IntArrayData(CF_marker); //HYPRE_Int n = hypre_ParVectorActualLocalSize(*toVector); HYPRE_Int n = hypre_IntArraySize(CF_marker); HYPRE_Int i, j; j = 0; for (i = 0; i < n; i++) { if (CF_marker_data[i] == point_type) { toVectorData[i] = b * toVectorData[i] + a * fromVectorData[j]; j++; } } return 0; } /************************************************************************************* * This routine assumes that the 'fromVector' is larger than the 'toVector' and * the CF_marker is of the same length as the fromVector. There must be n 'point_type' * values in the CF_marker, where n is the length of the 'toVector'. * It adds the values of the 'fromVector' where the marker is the * same as the 'point_type' to the 'toVector' *************************************************************************************/ HYPRE_Int hypre_MGRAddVectorR ( hypre_IntArray *CF_marker, HYPRE_Int point_type, HYPRE_Real a, hypre_ParVector *fromVector, HYPRE_Real b, hypre_ParVector **toVector ) { hypre_Vector *fromVectorLocal = hypre_ParVectorLocalVector(fromVector); HYPRE_Real *fromVectorData = hypre_VectorData(fromVectorLocal); hypre_Vector *toVectorLocal = hypre_ParVectorLocalVector(*toVector); HYPRE_Real *toVectorData = hypre_VectorData(toVectorLocal); HYPRE_Int *CF_marker_data = hypre_IntArrayData(CF_marker); //HYPRE_Int n = hypre_ParVectorActualLocalSize(*toVector); HYPRE_Int n = hypre_IntArraySize(CF_marker); HYPRE_Int i, j; j = 0; for (i = 0; i < n; i++) { if (CF_marker_data[i] == point_type) { toVectorData[j] = b * toVectorData[j] + a * fromVectorData[i]; j++; } } return 0; } /* HYPRE_Int hypre_MGRBuildAffRAP( MPI_Comm comm, HYPRE_Int local_num_variables, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int *CF_marker, HYPRE_Int **coarse_dof_func_ptr, HYPRE_BigInt **coarse_pnts_global_ptr, hypre_ParCSRMatrix *A, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_f_ptr, hypre_ParCSRMatrix **A_ff_ptr ) { HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_num_variables, HYPRE_MEMORY_HOST); HYPRE_Int i; for (i = 0; i < local_num_variables; i++) { CF_marker_copy[i] = -CF_marker[i]; } hypre_BoomerAMGCoarseParms(comm, local_num_variables, 1, NULL, CF_marker_copy, coarse_dof_func_ptr, coarse_pnts_global_ptr); hypre_MGRBuildP(A, CF_marker_copy, (*coarse_pnts_global_ptr), 0, debug_flag, P_f_ptr); hypre_BoomerAMGBuildCoarseOperator(*P_f_ptr, A, *P_f_ptr, A_ff_ptr); hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST); return 0; } */ /* Get pointer to coarse grid matrix for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridMatrix( void *mgr_vdata, hypre_ParCSRMatrix **RAP ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> RAP == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, " Coarse grid matrix is NULL. Please make sure MGRSetup() is called \n"); return hypre_error_flag; } *RAP = mgr_data->RAP; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridSolution( void *mgr_vdata, hypre_ParVector **sol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> U_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, " MGR solution array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *sol = mgr_data->U_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridRHS( void *mgr_vdata, hypre_ParVector **rhs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> F_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, " MGR RHS array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *rhs = mgr_data->F_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Print coarse grid linear system (for debugging)*/ HYPRE_Int hypre_MGRPrintCoarseSystem( void *mgr_vdata, HYPRE_Int print_flag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; mgr_data->print_coarse_system = print_flag; return hypre_error_flag; } /* Print solver params */ HYPRE_Int hypre_MGRWriteSolverParams(void *mgr_vdata) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int i, j; HYPRE_Int max_num_coarse_levels = (mgr_data -> max_num_coarse_levels); hypre_printf("MGR Setup parameters: \n"); hypre_printf("Block size: %d\n", (mgr_data -> block_size)); hypre_printf("Max number of coarse levels: %d\n", (mgr_data -> max_num_coarse_levels)); hypre_printf("Relax type: %d\n", (mgr_data -> relax_type)); hypre_printf("Set non-Cpoints to F-points: %d\n", (mgr_data -> set_non_Cpoints_to_F)); hypre_printf("Set Cpoints method: %d\n", (mgr_data -> set_c_points_method)); for (i = 0; i < max_num_coarse_levels; i++) { hypre_printf("Lev = %d, Interpolation type: %d\n", i, (mgr_data -> interp_type)[i]); hypre_printf("Lev = %d, Restriction type: %d\n", i, (mgr_data -> restrict_type)[i]); hypre_printf("Lev = %d, F-relaxation method: %d\n", i, (mgr_data -> Frelax_method)[i]); hypre_printf("Lev = %d, Use non-Galerkin coarse grid: %d\n", i, (mgr_data -> use_non_galerkin_cg)[i]); HYPRE_Int lvl_num_coarse_points = (mgr_data -> block_num_coarse_indexes)[i]; hypre_printf("Lev = %d, Number of Cpoints: %d\n", i, lvl_num_coarse_points); hypre_printf("Cpoints indices: "); for (j = 0; j < lvl_num_coarse_points; j++) { if ((mgr_data -> block_cf_marker)[i][j] == 1) { hypre_printf("%d ", j); } } hypre_printf("\n"); } hypre_printf("Number of Reserved Cpoints: %d\n", (mgr_data -> reserved_coarse_size)); hypre_printf("Keep reserved Cpoints to level: %d\n", (mgr_data -> lvl_to_keep_cpoints)); hypre_printf("\n MGR Solver Parameters: \n"); hypre_printf("Number of relax sweeps: %d\n", (mgr_data -> num_relax_sweeps)); hypre_printf("Number of interpolation sweeps: %d\n", (mgr_data -> num_interp_sweeps)); hypre_printf("Number of restriction sweeps: %d\n", (mgr_data -> num_restrict_sweeps)); hypre_printf("Global smoother type: %d\n", (mgr_data ->global_smooth_type)); hypre_printf("Number of global smoother sweeps: %d\n", (mgr_data ->global_smooth_iters)); hypre_printf("Max number of iterations: %d\n", (mgr_data -> max_iter)); hypre_printf("Stopping tolerance: %e\n", (mgr_data -> tol)); hypre_printf("Use default coarse grid solver: %d\n", (mgr_data -> use_default_cgrid_solver)); if ((mgr_data -> fsolver_mode) >= 0) { hypre_printf("Use AMG solver for full AMG F-relaxation: %d\n", (mgr_data -> fsolver_mode)); } return hypre_error_flag; } #ifdef HYPRE_USING_DSUPERLU void * hypre_MGRDirectSolverCreate() { hypre_DSLUData *dslu_data = hypre_CTAlloc(hypre_DSLUData, 1, HYPRE_MEMORY_HOST); return (void *) dslu_data; } HYPRE_Int hypre_MGRDirectSolverSetup( void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { /* Par Data Structure variables */ HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_local; HYPRE_Int num_rows; HYPRE_Int num_procs, my_id; HYPRE_Int pcols = 1, prows = 1; HYPRE_BigInt *big_rowptr = NULL; hypre_DSLUData *dslu_data = (hypre_DSLUData *) solver; HYPRE_Int info = 0; HYPRE_Int nrhs = 0; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* Merge diag and offd into one matrix (global ids) */ A_local = hypre_MergeDiagAndOffd(A); num_rows = hypre_CSRMatrixNumRows(A_local); /* Now convert hypre matrix to a SuperMatrix */ #ifdef HYPRE_MIXEDINT { HYPRE_Int *rowptr = NULL; HYPRE_Int i; rowptr = hypre_CSRMatrixI(A_local); big_rowptr = hypre_CTAlloc(HYPRE_BigInt, (num_rows + 1), HYPRE_MEMORY_HOST); for (i = 0; i < (num_rows + 1); i++) { big_rowptr[i] = (HYPRE_BigInt)rowptr[i]; } } #else big_rowptr = hypre_CSRMatrixI(A_local); #endif dCreate_CompRowLoc_Matrix_dist( &(dslu_data->A_dslu), global_num_rows, global_num_rows, hypre_CSRMatrixNumNonzeros(A_local), num_rows, hypre_ParCSRMatrixFirstRowIndex(A), hypre_CSRMatrixData(A_local), hypre_CSRMatrixBigJ(A_local), big_rowptr, SLU_NR_loc, SLU_D, SLU_GE); /* DOK: SuperLU frees assigned data, so set them to null before * calling hypre_CSRMatrixdestroy on A_local to avoid memory errors. */ #ifndef HYPRE_MIXEDINT hypre_CSRMatrixI(A_local) = NULL; #endif hypre_CSRMatrixData(A_local) = NULL; hypre_CSRMatrixBigJ(A_local) = NULL; hypre_CSRMatrixDestroy(A_local); /*Create process grid */ while (prows * pcols <= num_procs) { ++prows; } --prows; pcols = num_procs / prows; while (prows * pcols != num_procs) { prows -= 1; pcols = num_procs / prows; } //hypre_printf(" prows %d pcols %d\n", prows, pcols); superlu_gridinit(comm, prows, pcols, &(dslu_data->dslu_data_grid)); set_default_options_dist(&(dslu_data->dslu_options)); dslu_data->dslu_options.Fact = DOFACT; dslu_data->dslu_options.PrintStat = NO; /*dslu_data->dslu_options.IterRefine = SLU_DOUBLE; dslu_data->dslu_options.ColPerm = MMD_AT_PLUS_A; dslu_data->dslu_options.DiagPivotThresh = 1.0; dslu_data->dslu_options.ReplaceTinyPivot = NO; */ dScalePermstructInit(global_num_rows, global_num_rows, &(dslu_data->dslu_ScalePermstruct)); dLUstructInit(global_num_rows, &(dslu_data->dslu_data_LU)); PStatInit(&(dslu_data->dslu_data_stat)); dslu_data->global_num_rows = global_num_rows; dslu_data->berr = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST); dslu_data->berr[0] = 0.0; pdgssvx(&(dslu_data->dslu_options), &(dslu_data->A_dslu), &(dslu_data->dslu_ScalePermstruct), NULL, num_rows, nrhs, &(dslu_data->dslu_data_grid), &(dslu_data->dslu_data_LU), &(dslu_data->dslu_solve), dslu_data->berr, &(dslu_data->dslu_data_stat), &info); dslu_data->dslu_options.Fact = FACTORED; return hypre_error_flag; } HYPRE_Int hypre_MGRDirectSolverSolve( void *solver, hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { hypre_SLUDistSolve(solver, f, u); return hypre_error_flag; } HYPRE_Int hypre_MGRDirectSolverDestroy( void *solver ) { hypre_SLUDistDestroy(solver); return hypre_error_flag; } #endif
3dMath.h
/* Public Domain / CC0 C99 Vector Math Library */ #ifndef CHAD_MATH_H #define CHAD_MATH_H /* Default behavior- compatibility. */ #ifndef CHAD_MATH_NO_ALIGN #define CHAD_MATH_NO_ALIGN #endif #ifdef __TINYC__ #define CHAD_MATH_NO_ALIGN #endif #ifndef CHAD_MATH_NO_ALIGN #include <stdalign.h> #define CHAD_ALIGN alignas(16) #warning "Chad math library compiling with alignas of 16, malloc and realloc MUST return 16-byte-aligned pointers." #else #define CHAD_ALIGN /*a comment*/ #endif #include <math.h> #include <string.h> typedef float f_; typedef unsigned int uint; #define MAX(x,y) (x>y?x:y) #define MIN(x,y) (x<y?x:y) typedef struct {CHAD_ALIGN f_ d[3];} vec3; typedef struct {CHAD_ALIGN int d[3];} ivec3; typedef struct {CHAD_ALIGN f_ d[4];} vec4; typedef struct {CHAD_ALIGN f_ d[16];} mat4; /*Collision detection These Algorithms return the penetration vector into the shape in the first argument With depth of penetration in element 4 if depth of penetration is zero or lower then there is no penetration. */ typedef struct{ vec4 c; vec3 e; }aabb; typedef aabb colshape; /*c.d[3] determines if it's a sphere or box. 0 or less = box, greater than 0 = sphere*/ static inline mat4 scalemat4( vec4 s){ mat4 ret; for(int i = 1; i < 16; i++) ret.d[i]= 0.0; ret.d[0*4 + 0] = s.d[0]; ret.d[1*4 + 1] = s.d[1]; ret.d[2*4 + 2] = s.d[2]; ret.d[3*4 + 3] = s.d[3]; return ret; } static inline int invmat4( mat4 m, mat4* invOut) /*returns 1 if successful*/ { mat4 inv; f_ det; int i; inv.d[0] = m.d[5] * m.d[10] * m.d[15] - m.d[5] * m.d[11] * m.d[14] - m.d[9] * m.d[6] * m.d[15] + m.d[9] * m.d[7] * m.d[14] + m.d[13] * m.d[6] * m.d[11] - m.d[13] * m.d[7] * m.d[10]; inv.d[4] = -m.d[4] * m.d[10] * m.d[15] + m.d[4] * m.d[11] * m.d[14] + m.d[8] * m.d[6] * m.d[15] - m.d[8] * m.d[7] * m.d[14] - m.d[12] * m.d[6] * m.d[11] + m.d[12] * m.d[7] * m.d[10]; inv.d[8] = m.d[4] * m.d[9] * m.d[15] - m.d[4] * m.d[11] * m.d[13] - m.d[8] * m.d[5] * m.d[15] + m.d[8] * m.d[7] * m.d[13] + m.d[12] * m.d[5] * m.d[11] - m.d[12] * m.d[7] * m.d[9]; inv.d[12] = -m.d[4] * m.d[9] * m.d[14] + m.d[4] * m.d[10] * m.d[13] + m.d[8] * m.d[5] * m.d[14] - m.d[8] * m.d[6] * m.d[13] - m.d[12] * m.d[5] * m.d[10] + m.d[12] * m.d[6] * m.d[9]; inv.d[1] = -m.d[1] * m.d[10] * m.d[15] + m.d[1] * m.d[11] * m.d[14] + m.d[9] * m.d[2] * m.d[15] - m.d[9] * m.d[3] * m.d[14] - m.d[13] * m.d[2] * m.d[11] + m.d[13] * m.d[3] * m.d[10]; inv.d[5] = m.d[0] * m.d[10] * m.d[15] - m.d[0] * m.d[11] * m.d[14] - m.d[8] * m.d[2] * m.d[15] + m.d[8] * m.d[3] * m.d[14] + m.d[12] * m.d[2] * m.d[11] - m.d[12] * m.d[3] * m.d[10]; inv.d[9] = -m.d[0] * m.d[9] * m.d[15] + m.d[0] * m.d[11] * m.d[13] + m.d[8] * m.d[1] * m.d[15] - m.d[8] * m.d[3] * m.d[13] - m.d[12] * m.d[1] * m.d[11] + m.d[12] * m.d[3] * m.d[9]; inv.d[13] = m.d[0] * m.d[9] * m.d[14] - m.d[0] * m.d[10] * m.d[13] - m.d[8] * m.d[1] * m.d[14] + m.d[8] * m.d[2] * m.d[13] + m.d[12] * m.d[1] * m.d[10] - m.d[12] * m.d[2] * m.d[9]; inv.d[2] = m.d[1] * m.d[6] * m.d[15] - m.d[1] * m.d[7] * m.d[14] - m.d[5] * m.d[2] * m.d[15] + m.d[5] * m.d[3] * m.d[14] + m.d[13] * m.d[2] * m.d[7] - m.d[13] * m.d[3] * m.d[6]; inv.d[6] = -m.d[0] * m.d[6] * m.d[15] + m.d[0] * m.d[7] * m.d[14] + m.d[4] * m.d[2] * m.d[15] - m.d[4] * m.d[3] * m.d[14] - m.d[12] * m.d[2] * m.d[7] + m.d[12] * m.d[3] * m.d[6]; inv.d[10] = m.d[0] * m.d[5] * m.d[15] - m.d[0] * m.d[7] * m.d[13] - m.d[4] * m.d[1] * m.d[15] + m.d[4] * m.d[3] * m.d[13] + m.d[12] * m.d[1] * m.d[7] - m.d[12] * m.d[3] * m.d[5]; inv.d[14] = -m.d[0] * m.d[5] * m.d[14] + m.d[0] * m.d[6] * m.d[13] + m.d[4] * m.d[1] * m.d[14] - m.d[4] * m.d[2] * m.d[13] - m.d[12] * m.d[1] * m.d[6] + m.d[12] * m.d[2] * m.d[5]; inv.d[3] = -m.d[1] * m.d[6] * m.d[11] + m.d[1] * m.d[7] * m.d[10] + m.d[5] * m.d[2] * m.d[11] - m.d[5] * m.d[3] * m.d[10] - m.d[9] * m.d[2] * m.d[7] + m.d[9] * m.d[3] * m.d[6]; inv.d[7] = m.d[0] * m.d[6] * m.d[11] - m.d[0] * m.d[7] * m.d[10] - m.d[4] * m.d[2] * m.d[11] + m.d[4] * m.d[3] * m.d[10] + m.d[8] * m.d[2] * m.d[7] - m.d[8] * m.d[3] * m.d[6]; inv.d[11] = -m.d[0] * m.d[5] * m.d[11] + m.d[0] * m.d[7] * m.d[9] + m.d[4] * m.d[1] * m.d[11] - m.d[4] * m.d[3] * m.d[9] - m.d[8] * m.d[1] * m.d[7] + m.d[8] * m.d[3] * m.d[5]; inv.d[15] = m.d[0] * m.d[5] * m.d[10] - m.d[0] * m.d[6] * m.d[9] - m.d[4] * m.d[1] * m.d[10] + m.d[4] * m.d[2] * m.d[9] + m.d[8] * m.d[1] * m.d[6] - m.d[8] * m.d[2] * m.d[5]; det = m.d[0] * inv.d[0] + m.d[1] * inv.d[4] + m.d[2] * inv.d[8] + m.d[3] * inv.d[12]; if (det == 0) return 0; det = 1.0 / det; for (i = 0; i < 16; i++) invOut->d[i] = inv.d[i] * det; return 1; } static inline mat4 perspective( f_ fov, f_ aspect, f_ near, f_ far){ mat4 ret; f_ D2R = 3.14159265358979323 / 180.0; f_ yScale = 1.0/tanf(D2R * fov/2); f_ xScale = yScale/aspect; f_ nearmfar = near-far; ret.d[0*4+0] = xScale; ret.d[0*4+1]=0; ret.d[0*4+2]=0; ret.d[0*4+3]=0; ret.d[1*4+0]=0; ret.d[1*4+1]=yScale;ret.d[1*4+2]=0; ret.d[1*4+3]=0; ret.d[2*4+0]=0; ret.d[2*4+1]=0; ret.d[2*4+2]=(far+near)/nearmfar;ret.d[2*4+3]=-1; ret.d[3*4+0]=0; ret.d[3*4+1]=0; ret.d[3*4+2]=2*far*near/nearmfar;ret.d[3*4+3]=0; /* ret.d[0*4+0] = xScale; ret.d[0*4+1]=0; ret.d[0*4+2]=0; ret.d[0*4+3]=0; ret.d[1*4+0]=0; ret.d[1*4+1]=yScale;ret.d[1*4+2]=0; ret.d[1*4+3]=0; ret.d[2*4+0]=0; ret.d[2*4+1]=0; ret.d[2*4+2]=(far+near)/nearmfar; ret.d[2*4+3]=2*far*near/nearmfar; ret.d[3*4+0]=0; ret.d[3*4+1]=0; ret.d[3*4+2]=-1; ret.d[3*4+3]=0; */ return ret; } static inline vec3 viewport( uint xdim, uint ydim, vec3 input){ input.d[0] += 1; input.d[1] += 1; input.d[0] *= (f_)xdim / 2.0; input.d[1] *= (f_)ydim / 2.0; input.d[2] = (input.d[2])/2.0; return input; } static inline mat4 rotate( vec3 rotation){ f_ a = rotation.d[0]; f_ b = rotation.d[1]; f_ c = rotation.d[2]; mat4 rm; rm.d[0*4 + 0] = cosf(a)*cosf(b); rm.d[1*4 + 0] = sinf(a)*cosf(b); rm.d[2*4 + 0] = -sinf(b); rm.d[0*4 + 1] = cosf(a)*sinf(b)*sinf(c)-sinf(a)*cosf(c); rm.d[1*4 + 1] = sinf(a)*sinf(b)*sinf(c)+cosf(a)*cosf(c); rm.d[2*4 + 1] = cosf(b)*sinf(c); rm.d[0*4 + 2] = cosf(a)*sinf(b)*cosf(c)+sinf(a)*sinf(c); rm.d[1*4 + 2] = sinf(a)*sinf(b)*cosf(c)-cosf(a)*sinf(c); rm.d[2*4 + 2] = cosf(b)*cosf(c); rm.d[0*4 + 3] = 0; rm.d[1*4 + 3] = 0; rm.d[2*4 + 3] = 0; rm.d[3*4 + 3] = 1; /*the bottom right corner of the matrix.*/ rm.d[3*4 + 0] = 0; rm.d[3*4 + 1] = 0; rm.d[3*4 + 2] = 0; return rm; } static inline f_ clampf( f_ a, f_ min, f_ max){ if(a<min) return min; if(a>max) return max; return a; } static inline f_ lengthv3( vec3 a){ return sqrtf(a.d[0] * a.d[0] + a.d[1] * a.d[1] + a.d[2] * a.d[2]); } static inline f_ lengthv4( vec4 a){ return sqrtf(a.d[0] * a.d[0] + a.d[1] * a.d[1] + a.d[2] * a.d[2] + a.d[3] * a.d[3]); } static inline vec3 multvec3( vec3 a, vec3 b){ return (vec3){ .d[0]=a.d[0]*b.d[0], .d[1]=a.d[1]*b.d[1], .d[2]=a.d[2]*b.d[2] }; } static inline vec4 multvec4( vec4 a, vec4 b){ return (vec4){ .d[0]=a.d[0]*b.d[0], .d[1]=a.d[1]*b.d[1], .d[2]=a.d[2]*b.d[2], .d[3]=a.d[3]*b.d[3] }; } static inline vec3 clampvec3( vec3 a, vec3 min, vec3 max){ vec3 ret; ret.d[0] = clampf(a.d[0],min.d[0],max.d[0]); ret.d[1] = clampf(a.d[1],min.d[1],max.d[1]); ret.d[2] = clampf(a.d[2],min.d[2],max.d[2]); return ret; } static inline vec4 clampvec4( vec4 a, vec4 min, vec4 max){ vec4 ret; ret.d[0] = clampf(a.d[0],min.d[0],max.d[0]); ret.d[1] = clampf(a.d[1],min.d[1],max.d[1]); ret.d[2] = clampf(a.d[2],min.d[2],max.d[2]); ret.d[3] = clampf(a.d[3],min.d[3],max.d[3]); return ret; } static inline f_ dotv3( vec3 a, vec3 b){ return a.d[0] * b.d[0] + a.d[1] * b.d[1] + a.d[2] * b.d[2]; } static inline f_ dotv4( vec4 a, vec4 b){ return a.d[0] * b.d[0] + a.d[1] * b.d[1] + a.d[2] * b.d[2] + a.d[3] * b.d[3]; } static inline vec4 getrow( mat4 a, uint index){ return (vec4){ .d[0]=a.d[index], .d[1]=a.d[4+index], .d[2]=a.d[8+index], .d[3]=a.d[12+index] }; } static inline mat4 swapRowColumnMajor( mat4 in){ mat4 result; vec4 t; int i = 0; t = getrow(in,i); memcpy(result.d+i*4, t.d, 4*4);i++; t = getrow(in,i); memcpy(result.d+i*4, t.d, 4*4);i++; t = getrow(in,i); memcpy(result.d+i*4, t.d, 4*4);i++; t = getrow(in,i); memcpy(result.d+i*4, t.d, 4*4); return result; } static inline vec4 getcol( mat4 a, uint index){ return (vec4){ .d[0]=a.d[index*4], .d[1]=a.d[index*4+1], .d[2]=a.d[index*4+2], .d[3]=a.d[index*4+3] }; } static inline mat4 multm4( mat4 a, mat4 b){ mat4 ret; #ifdef _OPENMP #pragma omp simd #endif for(int i = 0; i < 4; i++) for(int j = 0; j < 4; j++) ret.d[i*4 + j] = dotv4( /*j is the ROW of the target, i is the COLUMN.*/ getrow(a, j), /*we retrieve the same ROW as our ROW INDEX.*/ getcol(b, i) /*we retrieve the same COLUMN as our COLUMN INDEX.*/ ); return ret; } static inline vec4 mat4xvec4( mat4 t, vec4 v){ vec4 vr; /* Getting a ROW of the matrix and dotting it with the COLUMN VECTOR to get ONE ROW of the output COLUMN VECTOR- one float.*/ vr.d[0] = t.d[0*4] * v.d[0] + t.d[1*4] * v.d[1] + t.d[2*4] * v.d[2] + t.d[3*4] * v.d[3]; vr.d[1] = t.d[0*4+1] * v.d[0] + t.d[1*4+1] * v.d[1] + t.d[2*4+1] * v.d[2] + t.d[3*4+1] * v.d[3]; vr.d[2] = t.d[0*4+2] * v.d[0] + t.d[1*4+2] * v.d[1] + t.d[2*4+2] * v.d[2] + t.d[3*4+2] * v.d[3]; vr.d[3] = t.d[0*4+3] * v.d[0] + t.d[1*4+3] * v.d[1] + t.d[2*4+3] * v.d[2] + t.d[3*4+3] * v.d[3]; return vr; } static inline vec3 crossv3( vec3 a, vec3 b){ vec3 retval; retval.d[0] = a.d[1] * b.d[2] - a.d[2] * b.d[1]; retval.d[1] = a.d[2] * b.d[0] - a.d[0] * b.d[2]; retval.d[2] = a.d[0] * b.d[1] - a.d[1] * b.d[0]; return retval; } static inline vec3 scalev3( f_ s, vec3 i){i.d[0] *= s; i.d[1] *= s; i.d[2] *= s; return i;} static inline vec4 scalev4( f_ s, vec4 i){i.d[0] *= s; i.d[1] *= s; i.d[2] *= s;i.d[3] *= s; return i;} static inline vec3 normalizev3( vec3 a){ if(lengthv3(a)==0) return (vec3){.d[0]=0.0,.d[1]=0.0,.d[2]=1.0}; return scalev3(1.0/lengthv3(a), a); } static inline vec4 normalizev4( vec4 a){ if(lengthv4(a)==0) return (vec4){.d[0]=0.0,.d[1]=0.0,.d[2]=1.0,.d[3]=0.0}; return scalev4(1.0/lengthv4(a), a); } static inline vec3 addv3( vec3 aa, vec3 b){ vec3 a = aa; a.d[0] += b.d[0]; a.d[1] += b.d[1]; a.d[2] += b.d[2]; return a; } static inline vec3 rotatev3( vec3 in, vec3 axis, f_ ang){ vec3 t1 = scalev3(cosf(ang),in); vec3 t2 = scalev3(sinf(ang),crossv3(axis,in)); vec3 t3 = scalev3((1-cosf(ang))*dotv3(axis,in),axis); return addv3(t1,addv3(t2,t3)); } static inline vec4 addv4( vec4 aa, vec4 b){ vec4 a = aa; a.d[0] += b.d[0]; a.d[1] += b.d[1]; a.d[2] += b.d[2]; a.d[3] += b.d[3]; return a; } static inline vec3 subv3( vec3 a, vec3 b){ return addv3(a,scalev3(-1,b)); } static inline mat4 identitymat4(){ return scalemat4( (vec4){.d[0]=1.0,.d[1]=1.0,.d[2]=1.0,.d[3]=1.0} ); } static inline mat4 translate( vec3 t){ mat4 tm = identitymat4(); tm.d[3*4+0] = t.d[0]; tm.d[3*4+1] = t.d[1]; tm.d[3*4+2] = t.d[2]; return tm; } static inline vec4 subv4( vec4 a, vec4 b){ return addv4(a,scalev4(-1,b)); } static inline vec3 reflect( vec3 in, vec3 norm){ return addv3(in, scalev3(-2.0*dotv3(norm, in), norm ) ); } static inline vec4 upv3( vec3 in, f_ w){ return (vec4){ .d[0]=in.d[0], .d[1]=in.d[1], .d[2]=in.d[2], .d[3]=w }; } static inline vec3 downv4( vec4 in){ return (vec3){ .d[0]=in.d[0], .d[1]=in.d[1], .d[2]=in.d[2] }; } static inline mat4 lookAt( vec3 eye, vec3 at, vec3 up){ mat4 cw = identitymat4(); vec3 zaxis = normalizev3(subv3(at,eye)); vec3 xaxis = normalizev3(crossv3(zaxis,up)); vec3 yaxis = crossv3(xaxis, zaxis); zaxis = scalev3(-1,zaxis); cw.d[0*4+0] = xaxis.d[0]; cw.d[1*4+0] = xaxis.d[1]; cw.d[2*4+0] = xaxis.d[2]; cw.d[3*4+0] = -dotv3(xaxis,eye); cw.d[0*4+1] = yaxis.d[0]; cw.d[1*4+1] = yaxis.d[1]; cw.d[2*4+1] = yaxis.d[2]; cw.d[3*4+1] = -dotv3(yaxis,eye); cw.d[0*4+2] = zaxis.d[0]; cw.d[1*4+2] = zaxis.d[1]; cw.d[2*4+2] = zaxis.d[2]; cw.d[3*4+2] = -dotv3(zaxis,eye); cw.d[0*4+3] = 0; cw.d[1*4+3] = 0; cw.d[2*4+3] = 0; cw.d[3*4+3] = 1; return cw; } /* Collision detection These Algorithms return the penetration vector into the shape in the first argument With depth of penetration in element 4 if depth of penetration is zero or lower then there is no penetration. */ static inline vec4 spherevsphere( vec4 s1, vec4 s2){ vec4 ret; vec3 diff = subv3( downv4(s2), downv4(s1) ); f_ lv3 = lengthv3(diff); f_ l = (s1.d[3] + s2.d[3]-lv3); if(l < 0 || lv3 == 0) { ret.d[3] = 0;return ret; } ret = upv3( scalev3( l/lv3,diff ) ,l ); return ret; } static inline int boxvboxbool (aabb b1, aabb b2){ vec3 sumextents = addv3(b1.e,b2.e); vec3 b1c = downv4(b1.c); vec3 b2c = downv4(b2.c); if( !( (fabs(b1c.d[0] - b2c.d[0]) <= sumextents.d[0]) && (fabs(b1c.d[1] - b2c.d[1]) <= sumextents.d[1]) && (fabs(b1c.d[2] - b2c.d[2]) <= sumextents.d[2]) ) ){ return 0; } return 1; } static inline vec4 boxvbox( aabb b1, aabb b2){ /*Just points along the minimum separating axis, Nothing fancy.*/ vec4 ret = (vec4){ .d[0]=0, .d[1]=0, .d[2]=0, .d[3]=0 }; vec3 sumextents = addv3(b1.e,b2.e); vec3 b1c = downv4(b1.c); vec3 b2c = downv4(b2.c); vec3 b1min = subv3(b1c,b1.e); vec3 b2min = subv3(b2c,b2.e); vec3 b1max = addv3(b1c,b1.e); vec3 b2max = addv3(b2c,b2.e); if( !( (fabs(b1c.d[0] - b2c.d[0]) <= sumextents.d[0]) && (fabs(b1c.d[1] - b2c.d[1]) <= sumextents.d[1]) && (fabs(b1c.d[2] - b2c.d[2]) <= sumextents.d[2]) ) ){ return ret; } vec3 axispen[2]; axispen[0] = subv3(b1max,b2min); axispen[1] = subv3(b1min,b2max); ret.d[3] = fabs(axispen[0].d[0]); ret.d[0] = axispen[0].d[0]; for(int i = 1; i < 6; i++){ if(fabs(axispen[i/3].d[i%3]) < fabs(ret.d[3])){ ret = (vec4){ .d[0]=0, .d[1]=0, .d[2]=0, .d[3]=(axispen[i/3].d[i%3]) }; ret.d[i%3] = ret.d[3]; ret.d[3] = fabs(ret.d[3]); } } return ret; } static inline vec3 closestpointAABB( aabb b, vec3 p){ vec3 b1min = subv3(downv4(b.c),b.e); vec3 b1max = addv3(downv4(b.c),b.e); return clampvec3(p,b1min,b1max); } static inline vec4 spherevaabb( vec4 sph, aabb box){ vec4 ret; vec3 p = closestpointAABB(box,downv4(sph)); vec3 v = subv3(p,downv4(sph)); f_ d2 = dotv3(v,v); if(d2 <= sph.d[3] * sph.d[3]){ f_ len = lengthv3(v); f_ diff = (sph.d[3] - len); if(len > 0){ f_ factor = diff/len; vec3 bruh = scalev3(factor, v); ret = upv3(bruh, diff); return ret; } else { aabb virt; virt.c = sph; virt.e.d[0] = sph.d[3]; virt.e.d[1] = sph.d[3]; virt.e.d[2] = sph.d[3]; return boxvbox(virt,box); } } else return (vec4){ .d[0]=0, .d[1]=0, .d[2]=0, .d[3]=0 }; } /*END Math_Library.h~~~~~~~~~~~~~~~~~~~~*/ #endif
DenseLayer.c
/* * DenseLayer.c * Francesco Conti <f.conti@unibo.it> * * Copyright (C) 2015 ETH Zurich, University of Bologna * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD license. See the LICENSE file for details. */ #include "linalg.h" #include "tiling.h" #include "DenseLayer.h" #ifdef CCN_TILING_LESSTIME #define _dense_tiling_init(); \ unsigned char (*tile_grid_non)[layer->ntile_nin] = (unsigned char (*)[layer->ntile_nin]) layer->tile_grid_non; \ unsigned char (*tile_grid_nin)[layer->ntile_nin] = (unsigned char (*)[layer->ntile_nin]) layer->tile_grid_nin; \ int _non = tile_grid_non[aa][bb]; \ int _nin = tile_grid_nin[aa][bb]; #else /* ~CCN_TILING_LESSTIME */ #define _dense_tiling_init(); \ int _non = (aa < layer->ntile_full_non) ? layer->tiling_max_non : layer->tlast_non; \ int _nin = (bb < layer->ntile_full_nin) ? layer->tiling_max_nin : layer->tlast_nin; #endif /* ~CCN_TILING_LESSTIME */ #define _dense_notiling_init(); \ int _non = layer->n_out_neurons; \ int _nin = layer->n_in_neurons; /** * Allocates a new DenseLayer data structure and its fields (weight, bias, * output feature maps). * * @return a pointer to the new DenseLayer data structure. * * @param n_in_neurons * the number of input feature maps. * @param n_out_neurons * the number of output feature maps. * @param input_height * the height of the input feature maps. * @param input_width * the width of the input feature maps. * @param output_height * the height of the output feature maps. * @param output_width * the width of the output feature maps. * @param activation * 1 if activation is tanh, 0 if no activation. * @param *x * a *mandatory* pointer to the input feature maps. * @param *y * an *optional* pointer to the already-allocated output feature maps. If * NULL, DenseLayer_new() will allocate y automatically. */ DenseLayer *DenseLayer_new( #ifdef CCN_NOALLOC DenseLayer *layer, #endif /* CCN_NOALLOC */ const char *name, data_t *w, data_t *b, data_t *x, data_t *y, data_t *loc_x0, data_t *loc_x1, data_t *loc_y0, data_t *loc_y1, data_t *loc_y2, data_t *loc_w0, data_t *loc_w1, data_t *loc_b, int n_out_neurons, int n_in_neurons, int activation, int tiling_max_non, int tiling_max_nin, unsigned qf ) { #ifndef CCN_NOALLOC // build DenseLayer DenseLayer *layer; layer = ccn_malloc(sizeof(DenseLayer)); #endif /* ifndef CCN_NOALLOC */ layer->name = name; layer->n_in_neurons = n_in_neurons; layer->n_out_neurons = n_out_neurons; layer->activation = activation; layer->w = w; layer->b = b; layer->x = x; layer->y = y; layer->qf = qf; #ifndef CCN_CACHE layer->loc_x0 = loc_x0; layer->loc_y0 = loc_y0; layer->loc_x1 = loc_x1; layer->loc_y1 = loc_y1; layer->loc_y2 = loc_y2; layer->loc_w0 = loc_w0; layer->loc_w1 = loc_w1; layer->loc_b = loc_b; #endif /* ifndef CCN_CACHE */ layer->tiling_max_non = tiling_max_non; layer->tiling_max_nin = tiling_max_nin; #ifdef CCN_TILING // define and record the number of tiles int ntile_non = (n_out_neurons % tiling_max_non ) ? n_out_neurons / tiling_max_non + 1 : n_out_neurons / tiling_max_non; int ntile_nin = (n_in_neurons % tiling_max_nin ) ? n_in_neurons / tiling_max_nin + 1 : n_in_neurons / tiling_max_nin; layer->ntile_non = ntile_non; layer->ntile_nin = ntile_nin; #ifdef CCN_TILING_LESSMEM layer->tlast_non = n_out_neurons % tiling_max_non; layer->tlast_nin = n_in_neurons % tiling_max_nin; layer->ntile_full_non = ntile_non; layer->ntile_full_nin = ntile_nin; #else /* ~CCN_TILING_LESSMEM */ // allocate the tile grid in a flat fashion layer->tile_grid_non = ccn_malloc(sizeof(unsigned char)*(ntile_non+NB_PIPE_STAGE-1)*ntile_nin); layer->tile_grid_nin = ccn_malloc(sizeof(unsigned char)*(ntile_non+NB_PIPE_STAGE-1)*ntile_nin); // cast the tile grid to a 4-dimensional array unsigned char (*tile_grid_non)[ntile_nin] = layer->tile_grid_non; unsigned char (*tile_grid_nin)[ntile_nin] = layer->tile_grid_nin; #endif /* ~CCN_TILING_LESSMEM */ // fill in the tile grid int aa, bb; for(aa=0; aa<layer->ntile_non; aa++) { for(bb=0; bb<layer->ntile_nin; bb++) { #ifdef CCN_TILING_LESSTIME if(bb*tiling_max_nin > n_in_neurons-tiling_max_nin) { tile_grid_nin[aa][bb] = (unsigned char) n_in_neurons % tiling_max_nin; } else { tile_grid_nin[aa][bb] = (unsigned char) tiling_max_nin; } if(aa*tiling_max_non > n_out_neurons-tiling_max_non) { tile_grid_non[aa][bb] = (unsigned char) n_out_neurons % tiling_max_non; } else { tile_grid_non[aa][bb] = (unsigned char) tiling_max_non; } #else /* ~CCN_TILING_LESSTIME */ if(bb*tiling_max_nin > n_in_neurons-tiling_max_nin) { layer->ntile_full_nin = bb; } if(aa*tiling_max_non > n_out_neurons-tiling_max_non) { layer->ntile_full_non = aa; } #endif /* ~CCN_TILING_LESSTIME */ } } #ifdef CCN_TILING_LESSTIME for(aa=layer->ntile_non; aa<layer->ntile_non+NB_PIPE_STAGE-1; aa++) { for(bb=0; bb<layer->ntile_nin; bb++) { tile_grid_nin[aa][bb] = tiling_max_nin; tile_grid_non[aa][bb] = tiling_max_non; } } #endif /* CCN_TILING_LESSTIME */ #else /* ~CCN_TILING */ // no tile grid int ntile_non = n_out_neurons; int ntile_nin = n_in_neurons; layer->ntile_non = ntile_non; layer->ntile_nin = ntile_nin; #endif /* ~CCN_TILING */ #ifdef TILING_DEBUG printf("[DenseLayer %s] NOn grid:\n", layer->name); for(aa=0; aa<layer->ntile_non; aa++) { printf(" "); for(bb=0; bb<layer->ntile_nin; bb++) { printf("%d ", tile_grid_non[aa][bb]); } printf("\n"); } printf("[DenseLayer %s] NIn grid:\n", layer->name); for(aa=0; aa<layer->ntile_non; aa++) { printf(" "); for(bb=0; bb<layer->ntile_nin; bb++) { printf("%d ", tile_grid_nin[aa][bb]); } printf("\n"); } #endif /* TILING_DEBUG */ return layer; } void DenseLayer_delete( DenseLayer *layer ) { #ifndef CCN_CACHE ccn_free(layer->loc_w0); ccn_free(layer->loc_w1); ccn_free(layer->loc_b); #endif /* ~CCN_CACHE */ #ifdef CCN_TILING ccn_free(layer->tile_grid_non); ccn_free(layer->tile_grid_nin); #endif /* ~CCN_TILING */ ccn_free(layer); } static void DenseLayer_pipe_fe( DenseLayer *layer, int aa, int bb ) { #ifdef CCN_CACHE return; #endif // if aa is -1, it means that this is the last tile (and bb, ii, jj also = -1) if(aa==-1) return; #ifdef FETCH_PROFILE perf_enable_all(); perf_reset(); perf_start(); #endif /* FETCH_PROFILE */ { _dense_tiling_init() data_t *l2_x = ccn_get_tile_1d( layer->x, bb, layer->tiling_max_nin ); data_t *l2_y = ccn_get_tile_1d( layer->y, aa, layer->tiling_max_non ); data_t *l2_W = ccn_get_tile_2d( layer->w, bb, aa, layer->tiling_max_nin, layer->tiling_max_non, layer->n_out_neurons ); // X tile copy-in ccn_memcpy_async( layer->loc_x_fe, // pointers l2_x, _nin*sizeof(data_t) ); // W copy-in (check misalignment) ccn_memcpy_async_2d( layer->loc_w_fe, // pointers l2_W, _nin, // sizes _non*sizeof(data_t), _non*sizeof(data_t), // local strides layer->n_out_neurons*sizeof(data_t) // remote strides ); // b copy-in if(bb==0) { ccn_memcpy_async( layer->loc_b, &layer->b[aa*layer->tiling_max_non], _non*sizeof(data_t) ); } #ifdef FETCH_CHECKSUM int32_t sum_x = 0; int32_t sum_W = 0; int32_t sum_y = 0; for(int i=0; i<_nin; i++) { sum_x += layer->loc_x_fe[i]; } for(int i=0; i<_non*_nin; i++) { sum_W += layer->loc_w_fe[i]; } for(int i=0; i<_non; i++) { sum_y += layer->loc_y_fe[i]; } printf("[DenseLayer %s] Fetch checksum %d,%d: x=%d W=%d y=%d\n", layer->name, aa, bb, sum_x, sum_W, sum_y); #endif /* FETCH_CHECKSUM */ } #ifdef FETCH_PROFILE perf_stop(); int t0 = perf_get_cycles(); printf("[DenseLayer %s] Fetch profiling: %d\n", layer->name, t0); #endif /* FETCH_PROFILE */ } static void DenseLayer_pipe_ex( DenseLayer *layer, int aa, int bb ) { // if aa is -1, it means that this is the first tile (and bb, ii, jj also = -1) if(aa==-1) return; #ifdef EXECUTE_PROFILE perf_enable_all(); perf_reset(); perf_start(); #endif /* EXECUTE_PROFILE */ // #pragma omp single nowait { #ifdef INTERM_CHECKSUM int print_flag = 0; #endif #ifdef CCN_TILING _dense_tiling_init() #else /* ~CCN_TILING */ _dense_notiling_init() #endif /* ~CCN_TILING */ #ifndef CCN_CACHE data_t *_x = layer->loc_x_ex; data_t *_y = layer->loc_y_ex; data_t *_W = layer->loc_w_ex; data_t *_b = layer->loc_b; #ifndef CCN_DOUBLEBUF // wait for the end of the fetch stage if not doing double buffering // ccn_memcpy_wait(); // #pragma omp barrier #endif /* ~CCN_DOUBLEBUF */ #else /* CCN_CACHE */ data_t *_x = ccn_get_tile_1d( layer->x, bb, layer->tiling_max_nin ); data_t *_y = ccn_get_tile_1d( layer->y, aa, layer->tiling_max_non ); data_t *_W = ccn_get_tile_2d( layer->w, aa, bb, layer->tiling_max_non, layer->tiling_max_nin, layer->n_in_neurons*layer->n_out_neurons ); #endif /* CCN_CACHE */ // biasing y if(bb==0) { for(int a=0; a<_non; a++) { _y[a] = _b[a]; } } // matrix x vector product linalg_mvprod(_W, 0, _x, _y, _nin, _non, layer->qf); // plp_matmul_i16(_W, _x, _y, _nin, _non, 1); // if(bb == layer->ntile_nin-1) { // printf("EX DEB %d %d\n", aa, bb); // for(int a=0; a<_non; a++) { // char *s = fixed2string(_y[a], 13, 5); // printf(" %d: %04x %s\n", a, _y[a], s); // free(s); // } // } // activation if(layer->activation == ACTIVATION_TANH) { for(int a=0; a<_non; a++) { _y[a] = ccn_tanh(_y[a]); } } else if(layer->activation == ACTIVATION_RELU) { for(int a=0; a<_non; a++) { _y[a] = (_y[a] < 0) ? 0 : _y[a]; } } #ifdef TILE_CHECKSUM { int i, sum=0; printf("[DenseLayer %s] Tile checksum %d,%d: ", layer->name, aa,bb); sum=0; for(i=0; i<_nin; i++){ sum+=_x[i]; } printf("xsum=%d, ", sum); sum=0; for(i=0; i<_non*_nin; i++) { sum+=_W[i]; } printf("wsum=%d, ", sum); sum=0; for(i=0; i<_non; i++) { sum+=_y[i]; } printf("ysum=%d\n", sum); printf(" xptr=%08x, wptr=%08x, yptr=%08x\n", _x, _W, _y); } #endif } #ifdef EXECUTE_PROFILE perf_stop(); int t0 = perf_get_cycles(); printf("[DenseLayer %s] Execute profiling: %d\n", layer->name, t0); #endif /* EXECUTE_PROFILE */ } static void DenseLayer_pipe_wb( DenseLayer *layer, int aa, int bb ) { #ifdef CCN_CACHE return; #endif // if aa is -1, it means that this is the first tile (and bb, ii, jj also = -1) if(aa==-1) return; #ifdef WRITEBACK_PROFILE perf_enable_all(); perf_reset(); perf_start(); #endif /* WRITEBACK_PROFILE */ // #pragma omp single { _dense_tiling_init(); data_t *l2_y = ccn_get_tile_1d( layer->y, aa, layer->tiling_max_non ); #ifdef WRITEBACK_CHECKSUM int32_t sum = 0; for(int i=0; i<_non; i++) { sum += layer->loc_y_wb[i]; } printf("[DenseLayer %s] Writeback checksum %d,%d: %d\n", layer->name, aa, bb, sum); #endif /* WRITEBACK_CHECKSUM */ #ifdef WRITEBACK_DEBUG printf("[DenseLayer %s] Writeback debug %d,%d:\n", layer->name, aa, bb); for(int i=0; i<_non; i++) { printf(" (%d): %04x\n", i, layer->loc_y_wb[i] & 0xffff); } #endif /* WRITEBACK_DEBUG */ // Y tile copy-out if(bb == layer->ntile_nin-1) { ccn_memcpy_async(// l2_y, // pointers layer->loc_y_wb, _non*sizeof(data_t) ); } } #ifdef WRITEBACK_DEBUG #pragma omp barrier #endif #ifdef WRITEBACK_PROFILE perf_stop(); int t0 = perf_get_cycles(); printf("[DenseLayer %s] Writeback profiling: %d\n", layer->name, t0); #endif /* WRITEBACK_PROFILE */ } /** * Executes the given DenseLayer, i.e. computes its outputs given the inputs * defined in the data structure. * The DenseLayer computes the output of a densely connected neural network * layer with 3d inputs and outputs (an array of 2d feature maps). * * @param *layer * a pointer to the DenseLayer data structure to execute. */ void DenseLayer_exec(DenseLayer *layer) { // DenseLayer_exec is now organized as a pipeline with the following stages // fetch (fe) : DMA in of a tile // execute (ex) : execution of layer // write-back (wb) : DMA out of a tile // all indeces have a fetch, execute and write-back version int aa_pipe,bb_pipe; int aa_fe = -1, bb_fe = -1; int aa_ex = -1, bb_ex = -1; int aa_wb = -1, bb_wb = -1; #ifdef CCN_DOUBLEBUF // initialize double buffering in a known state int doublebuf_state_x_fe = 0; int doublebuf_state_y_fe = 0; int doublebuf_state_y_wb = 0; #endif /* CCN_DOUBLEBUF */ #ifndef CCN_CACHE // initialize state of fe local buffer pointers layer->loc_x_fe = layer->loc_x0; layer->loc_w_fe = layer->loc_w0; layer->loc_y_fe = layer->loc_y0; #endif /* ~CCN_CACHE */ // reset the weights! memset(layer->loc_w0, 0, sizeof(data_t)*layer->tiling_max_non*layer->tiling_max_nin); memset(layer->loc_w1, 0, sizeof(data_t)*layer->tiling_max_non*layer->tiling_max_nin); #ifdef CCN_TILING for(aa_pipe=0; aa_pipe<layer->ntile_non+NB_PIPE_STAGE-1; aa_pipe++) { for(bb_pipe=0; bb_pipe<layer->ntile_nin; bb_pipe++) { // update state of fe indeces if(bb_pipe<layer->ntile_nin) { bb_fe = bb_pipe; aa_fe = aa_pipe; } else { bb_fe = -1; aa_fe = -1; } #ifndef CCN_CACHE #ifdef CCN_DOUBLEBUF // update state of fe local buffer pointers if (doublebuf_state_x_fe == 0) { layer->loc_x_fe = layer->loc_x0; } else { layer->loc_x_fe = layer->loc_x1; } if (doublebuf_state_x_fe == 0) { layer->loc_w_fe = layer->loc_w0; } else { layer->loc_w_fe = layer->loc_w1; } if (doublebuf_state_y_fe == 0) { layer->loc_y_fe = layer->loc_y0; } else if (doublebuf_state_y_fe == 1) { layer->loc_y_fe = layer->loc_y1; } else { layer->loc_y_fe = layer->loc_y2; } #endif /* CCN_DOUBLEBUF */ #endif /* ~CCN_CACHE */ #ifdef PIPE_DEBUG printf("[DenseLayer %s pipe] aa=%d bb=%d\n", layer->name, aa_pipe, bb_pipe); printf(" fe: aa=%d bb=%d\n", aa_fe, bb_fe); printf(" ex: aa=%d bb=%d\n", aa_ex, bb_ex); printf(" wb: aa=%d bb=%d\n", aa_wb, bb_wb); printf(" doublebuf states: %d %d %d\n", doublebuf_state_x_fe, doublebuf_state_y_fe, doublebuf_state_y_wb); printf("\n"); #endif PIPE_DEBUG #ifdef PIPE_PROFILE reset_timer(); start_timer(); #endif /* PIPE_PROFILE */ // #ifndef DISABLE_OPENMP // #pragma omp parallel num_threads(3) // #endif { // fetch stage // #ifndef DISABLE_OPENMP // if(omp_get_thread_num() == THREAD_FE) // #endif DenseLayer_pipe_fe(layer, aa_fe, bb_fe); // execute stage // #ifndef DISABLE_OPENMP // if(omp_get_thread_num() == THREAD_EX) // #endif DenseLayer_pipe_ex(layer, aa_ex, bb_ex); // write-back stage // #ifndef DISABLE_OPENMP // if(omp_get_thread_num() == THREAD_WB) // #endif DenseLayer_pipe_wb(layer, aa_wb, bb_wb); } #ifdef PIPE_PROFILE stop_timer(); int t0 = get_time(); reset_timer(); printf("[DenseLayer %s] Pipe profiling: %d\n", layer->name, t0); #endif /* PIPE_PROFILE */ // update state of ex,wb indeces bb_wb = bb_ex; bb_ex = bb_fe; aa_wb = aa_ex; aa_ex = aa_fe; // update state of ex,wb local buffers layer->loc_x_ex = layer->loc_x_fe; layer->loc_w_ex = layer->loc_w_fe; layer->loc_y_wb = layer->loc_y_ex; layer->loc_y_ex = layer->loc_y_fe; #ifndef CCN_CACHE #ifdef CCN_DOUBLEBUF // switch double buffering state if (doublebuf_state_x_fe == 0) { doublebuf_state_x_fe = 1; } else { doublebuf_state_x_fe = 0; } if (bb_pipe==layer->ntile_nin-1 && doublebuf_state_y_fe == 0) { doublebuf_state_y_fe = 1; } else if (bb_pipe==layer->ntile_nin-1 && doublebuf_state_y_fe == 1) { doublebuf_state_y_fe = 0; } if (bb_pipe==layer->ntile_nin-1 && doublebuf_state_y_wb == 0) { doublebuf_state_y_wb = 1; } else if (bb_pipe==layer->ntile_nin-1 && doublebuf_state_y_wb == 1) { doublebuf_state_y_wb = 0; } #endif /* CCN_DOUBLEBUF */ #endif /* ~CCN_CACHE */ } } #else /* ~CCN_TILING */ // fetch stage DenseLayer_pipe_fe(layer, 0, 0); // execute stage DenseLayer_pipe_ex(layer, 0, 0); // write-back stage DenseLayer_pipe_wb(layer, 0, 0); #endif /* CCN_TILING */ }
CycladesTrainer.h
/* * Copyright 2016 [See AUTHORS file for list of authors] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef _CYCLADES_TRAINER_ #define _CYCLADES_TRAINER_ class CycladesTrainer : public Trainer { private: void DebugPrintPartitions(DatapointPartitions &p) { for (int i = 0; i < p.NumBatches(); i++) { std::cout << "Batch " << i << std::endl; for (int j = 0; j < FLAGS_n_threads; j++) { std::cout << "Thread " << j << ": "; for (int k = 0; k < p.NumDatapointsInBatch(j, i); k++) { if (k != 0) std::cout << " "; std::cout << p.GetDatapoint(j, i, k)->GetOrder(); } std::cout << std::endl; } } } public: CycladesTrainer() { } ~CycladesTrainer() { } TrainStatistics Train(Model *model, const std::vector<Datapoint *> & datapoints, Updater *updater) override { // Partitions. CycladesPartitioner partitioner(model); Timer partition_timer; DatapointPartitions partitions = partitioner.Partition(datapoints, FLAGS_n_threads); if (FLAGS_print_partition_time) { this->PrintPartitionTime(partition_timer); } model->SetUpWithPartitions(partitions); updater->SetUpWithPartitions(partitions); // Default batch ordering. std::vector<int> batch_ordering(partitions.NumBatches()); for (int i = 0; i < partitions.NumBatches(); i++) { batch_ordering[i] = i; } // Default datapoint processing ordering. // [thread][batch][index]. std::vector<std::vector<std::vector<int> > > per_batch_datapoint_order(FLAGS_n_threads); for (int thread = 0; thread < FLAGS_n_threads; thread++) { per_batch_datapoint_order[thread].resize(partitions.NumBatches()); for (int batch = 0; batch < partitions.NumBatches(); batch++) { per_batch_datapoint_order[thread][batch].resize(partitions.NumDatapointsInBatch(thread, batch)); for (int index = 0; index < partitions.NumDatapointsInBatch(thread, batch); index++) { per_batch_datapoint_order[thread][batch][index] = index; } } } // Keep track of statistics of training. TrainStatistics stats; // Train. Timer gradient_timer; for (int epoch = 0; epoch < FLAGS_n_epochs; epoch++) { this->EpochBegin(epoch, gradient_timer, model, datapoints, &stats); // Random batch ordering generation. if (FLAGS_random_batch_processing) { for (int i = 0; i < partitions.NumBatches(); i++) { batch_ordering[i] = rand() % partitions.NumBatches(); } } // Random per batch datapoint processing. if (FLAGS_random_per_batch_datapoint_processing) { for (int thread = 0; thread < FLAGS_n_threads; thread++) { for (int batch = 0; batch < partitions.NumBatches(); batch++) { for (int index = 0; index < partitions.NumDatapointsInBatch(thread, batch); index++) { per_batch_datapoint_order[thread][batch][index] = rand() % partitions.NumDatapointsInBatch(thread, batch); } } } } updater->EpochBegin(); #pragma omp parallel num_threads(FLAGS_n_threads) { int thread = omp_get_thread_num(); for (int batch_count = 0; batch_count < partitions.NumBatches(); batch_count++) { int batch = batch_ordering[batch_count]; #pragma omp barrier for (int index_count = 0; index_count < partitions.NumDatapointsInBatch(thread, batch); index_count++) { int index = per_batch_datapoint_order[thread][batch][index_count]; updater->Update(model, partitions.GetDatapoint(thread, batch, index)); } } } updater->EpochFinish(); } return stats; } }; #endif
data.h
/*! * Copyright (c) 2015 by Contributors * \file data.h * \brief The input data structure of xgboost. * \author Tianqi Chen */ #ifndef XGBOOST_DATA_H_ #define XGBOOST_DATA_H_ #include <dmlc/base.h> #include <dmlc/data.h> #include <rabit/rabit.h> #include <xgboost/base.h> #include <xgboost/span.h> #include <xgboost/host_device_vector.h> #include <memory> #include <numeric> #include <algorithm> #include <string> #include <utility> #include <vector> namespace xgboost { // forward declare dmatrix. class DMatrix; /*! \brief data type accepted by xgboost interface */ enum class DataType : uint8_t { kFloat32 = 1, kDouble = 2, kUInt32 = 3, kUInt64 = 4 }; /*! * \brief Meta information about dataset, always sit in memory. */ class MetaInfo { public: /*! \brief number of data fields in MetaInfo */ static constexpr uint64_t kNumField = 7; /*! \brief number of rows in the data */ uint64_t num_row_{0}; /*! \brief number of columns in the data */ uint64_t num_col_{0}; /*! \brief number of nonzero entries in the data */ uint64_t num_nonzero_{0}; /*! \brief label of each instance */ HostDeviceVector<bst_float> labels_; /*! * \brief the index of begin and end of a group * needed when the learning task is ranking. */ std::vector<bst_group_t> group_ptr_; /*! \brief weights of each instance, optional */ HostDeviceVector<bst_float> weights_; /*! * \brief initialized margins, * if specified, xgboost will start from this init margin * can be used to specify initial prediction to boost from. */ HostDeviceVector<bst_float> base_margin_; /*! \brief default constructor */ MetaInfo() = default; MetaInfo& operator=(MetaInfo const& that) { this->num_row_ = that.num_row_; this->num_col_ = that.num_col_; this->num_nonzero_ = that.num_nonzero_; this->labels_.Resize(that.labels_.Size()); this->labels_.Copy(that.labels_); this->group_ptr_ = that.group_ptr_; this->weights_.Resize(that.weights_.Size()); this->weights_.Copy(that.weights_); this->base_margin_.Resize(that.base_margin_.Size()); this->base_margin_.Copy(that.base_margin_); return *this; } /*! * \brief Get weight of each instances. * \param i Instance index. * \return The weight. */ inline bst_float GetWeight(size_t i) const { return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f; } /*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */ inline const std::vector<size_t>& LabelAbsSort() const { if (label_order_cache_.size() == labels_.Size()) { return label_order_cache_; } label_order_cache_.resize(labels_.Size()); std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0); const auto& l = labels_.HostVector(); XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(), [&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);}); return label_order_cache_; } /*! \brief clear all the information */ void Clear(); /*! * \brief Load the Meta info from binary stream. * \param fi The input stream */ void LoadBinary(dmlc::Stream* fi); /*! * \brief Save the Meta info to binary stream * \param fo The output stream. */ void SaveBinary(dmlc::Stream* fo) const; /*! * \brief Set information in the meta info. * \param key The key of the information. * \param dptr The data pointer of the source array. * \param dtype The type of the source data. * \param num Number of elements in the source array. */ void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num); /*! * \brief Set information in the meta info with array interface. * \param key The key of the information. * \param interface_str String representation of json format array interface. * * [ column_0, column_1, ... column_n ] * * Right now only 1 column is permitted. */ void SetInfo(const char* key, std::string const& interface_str); private: /*! \brief argsort of labels */ mutable std::vector<size_t> label_order_cache_; }; /*! \brief Element from a sparse vector */ struct Entry { /*! \brief feature index */ bst_feature_t index; /*! \brief feature value */ bst_float fvalue; /*! \brief default constructor */ Entry() = default; /*! * \brief constructor with index and value * \param index The feature or row index. * \param fvalue The feature value. */ XGBOOST_DEVICE Entry(bst_feature_t index, bst_float fvalue) : index(index), fvalue(fvalue) {} /*! \brief reversely compare feature values */ inline static bool CmpValue(const Entry& a, const Entry& b) { return a.fvalue < b.fvalue; } inline bool operator==(const Entry& other) const { return (this->index == other.index && this->fvalue == other.fvalue); } }; /*! * \brief Parameters for constructing batches. */ struct BatchParam { /*! \brief The GPU device to use. */ int gpu_id; /*! \brief Maximum number of bins per feature for histograms. */ int max_bin; /*! \brief Number of rows in a GPU batch, used for finding quantiles on GPU. */ int gpu_batch_nrows; /*! \brief Page size for external memory mode. */ size_t gpu_page_size; inline bool operator!=(const BatchParam& other) const { return gpu_id != other.gpu_id || max_bin != other.max_bin || gpu_batch_nrows != other.gpu_batch_nrows || gpu_page_size != other.gpu_page_size; } }; /*! * \brief In-memory storage unit of sparse batch, stored in CSR format. */ class SparsePage { public: // Offset for each row. HostDeviceVector<bst_row_t> offset; /*! \brief the data of the segments */ HostDeviceVector<Entry> data; size_t base_rowid{}; /*! \brief an instance of sparse vector in the batch */ using Inst = common::Span<Entry const>; /*! \brief get i-th row from the batch */ inline Inst operator[](size_t i) const { const auto& data_vec = data.HostVector(); const auto& offset_vec = offset.HostVector(); size_t size; // in distributed mode, some partitions may not get any instance for a feature. Therefore // we should set the size as zero if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) { size = 0; } else { size = offset_vec[i + 1] - offset_vec[i]; } return {data_vec.data() + offset_vec[i], static_cast<Inst::index_type>(size)}; } /*! \brief constructor */ SparsePage() { this->Clear(); } /*! \return Number of instances in the page. */ inline size_t Size() const { return offset.Size() == 0 ? 0 : offset.Size() - 1; } /*! \return estimation of memory cost of this page */ inline size_t MemCostBytes() const { return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry); } /*! \brief clear the page */ inline void Clear() { base_rowid = 0; auto& offset_vec = offset.HostVector(); offset_vec.clear(); offset_vec.push_back(0); data.HostVector().clear(); } /*! \brief Set the base row id for this page. */ inline void SetBaseRowId(size_t row_id) { base_rowid = row_id; } SparsePage GetTranspose(int num_columns) const; void SortRows() { auto ncol = static_cast<bst_omp_uint>(this->Size()); #pragma omp parallel for default(none) shared(ncol) schedule(dynamic, 1) for (bst_omp_uint i = 0; i < ncol; ++i) { if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) { std::sort( this->data.HostVector().begin() + this->offset.HostVector()[i], this->data.HostVector().begin() + this->offset.HostVector()[i + 1], Entry::CmpValue); } } } /*! * \brief Push row block into the page. * \param batch the row batch. */ void Push(const dmlc::RowBlock<uint32_t>& batch); /** * \brief Pushes external data batch onto this page * * \tparam AdapterBatchT * \param batch * \param missing * \param nthread * * \return The maximum number of columns encountered in this input batch. Useful when pushing many adapter batches to work out the total number of columns. */ template <typename AdapterBatchT> uint64_t Push(const AdapterBatchT& batch, float missing, int nthread); /*! * \brief Push a sparse page * \param batch the row page */ void Push(const SparsePage &batch); /*! * \brief Push a SparsePage stored in CSC format * \param batch The row batch to be pushed */ void PushCSC(const SparsePage& batch); }; class CSCPage: public SparsePage { public: CSCPage() : SparsePage() {} explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {} }; class SortedCSCPage : public SparsePage { public: SortedCSCPage() : SparsePage() {} explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {} }; class EllpackPageImpl; /*! * \brief A page stored in ELLPACK format. * * This class uses the PImpl idiom (https://en.cppreference.com/w/cpp/language/pimpl) to avoid * including CUDA-specific implementation details in the header. */ class EllpackPage { public: /*! * \brief Default constructor. * * This is used in the external memory case. An empty ELLPACK page is constructed with its content * set later by the reader. */ EllpackPage(); /*! * \brief Constructor from an existing DMatrix. * * This is used in the in-memory case. The ELLPACK page is constructed from an existing DMatrix * in CSR format. */ explicit EllpackPage(DMatrix* dmat, const BatchParam& param); /*! \brief Destructor. */ ~EllpackPage(); /*! \return Number of instances in the page. */ size_t Size() const; /*! \brief Set the base row id for this page. */ void SetBaseRowId(size_t row_id); const EllpackPageImpl* Impl() const { return impl_.get(); } EllpackPageImpl* Impl() { return impl_.get(); } private: std::unique_ptr<EllpackPageImpl> impl_; }; template<typename T> class BatchIteratorImpl { public: virtual ~BatchIteratorImpl() = default; virtual T& operator*() = 0; virtual const T& operator*() const = 0; virtual void operator++() = 0; virtual bool AtEnd() const = 0; }; template<typename T> class BatchIterator { public: using iterator_category = std::forward_iterator_tag; explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); } void operator++() { CHECK(impl_ != nullptr); ++(*impl_); } T& operator*() { CHECK(impl_ != nullptr); return *(*impl_); } const T& operator*() const { CHECK(impl_ != nullptr); return *(*impl_); } bool operator!=(const BatchIterator& rhs) const { CHECK(impl_ != nullptr); return !impl_->AtEnd(); } bool AtEnd() const { CHECK(impl_ != nullptr); return impl_->AtEnd(); } private: std::shared_ptr<BatchIteratorImpl<T>> impl_; }; template<typename T> class BatchSet { public: explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(begin_iter) {} BatchIterator<T> begin() { return begin_iter_; } BatchIterator<T> end() { return BatchIterator<T>(nullptr); } private: BatchIterator<T> begin_iter_; }; /*! * \brief This is data structure that user can pass to DMatrix::Create * to create a DMatrix for training, user can create this data structure * for customized Data Loading on single machine. * * On distributed setting, usually an customized dmlc::Parser is needed instead. */ template<typename T> class DataSource : public dmlc::DataIter<T> { public: /*! * \brief Meta information about the dataset * The subclass need to be able to load this correctly from data. */ MetaInfo info; }; /*! * \brief Internal data structured used by XGBoost during training. * There are two ways to create a customized DMatrix that reads in user defined-format. * * - Provide a dmlc::Parser and pass into the DMatrix::Create * - Alternatively, if data can be represented by an URL, define a new dmlc::Parser and register by * DMLC_REGISTER_DATA_PARSER; * - This works best for user defined data input source, such as data-base, filesystem. * - Provide a DataSource, that can be passed to DMatrix::Create * This can be used to re-use inmemory data structure into DMatrix. */ class DMatrix { public: /*! \brief default constructor */ DMatrix() = default; /*! \brief meta information of the dataset */ virtual MetaInfo& Info() = 0; /*! \brief meta information of the dataset */ virtual const MetaInfo& Info() const = 0; /** * \brief Gets batches. Use range based for loop over BatchSet to access individual batches. */ template<typename T> BatchSet<T> GetBatches(const BatchParam& param = {}); // the following are column meta data, should be able to answer them fast. /*! \return Whether the data columns single column block. */ virtual bool SingleColBlock() const = 0; /*! \brief get column density */ virtual float GetColDensity(size_t cidx) = 0; /*! \brief virtual destructor */ virtual ~DMatrix() = default; /*! \brief Whether the matrix is dense. */ bool IsDense() const { return Info().num_nonzero_ == Info().num_row_ * Info().num_col_; } /*! * \brief Load DMatrix from URI. * \param uri The URI of input. * \param silent Whether print information during loading. * \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode. * \param file_format The format type of the file, used for dmlc::Parser::Create. * By default "auto" will be able to load in both local binary file. * \param page_size Page size for external memory. * \return The created DMatrix. */ static DMatrix* Load(const std::string& uri, bool silent, bool load_row_split, const std::string& file_format = "auto", size_t page_size = kPageSize); /** * \brief Creates a new DMatrix from an external data adapter. * * \tparam AdapterT Type of the adapter. * \param [in,out] adapter View onto an external data. * \param missing Values to count as missing. * \param nthread Number of threads for construction. * \param cache_prefix (Optional) The cache prefix for external memory. * \param page_size (Optional) Size of the page. * * \return a Created DMatrix. */ template <typename AdapterT> static DMatrix* Create(AdapterT* adapter, float missing, int nthread, const std::string& cache_prefix = "", size_t page_size = kPageSize); /*! \brief page size 32 MB */ static const size_t kPageSize = 32UL << 20UL; protected: virtual BatchSet<SparsePage> GetRowBatches() = 0; virtual BatchSet<CSCPage> GetColumnBatches() = 0; virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0; virtual BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) = 0; }; template<> inline BatchSet<SparsePage> DMatrix::GetBatches(const BatchParam&) { return GetRowBatches(); } template<> inline BatchSet<CSCPage> DMatrix::GetBatches(const BatchParam&) { return GetColumnBatches(); } template<> inline BatchSet<SortedCSCPage> DMatrix::GetBatches(const BatchParam&) { return GetSortedColumnBatches(); } template<> inline BatchSet<EllpackPage> DMatrix::GetBatches(const BatchParam& param) { return GetEllpackBatches(param); } } // namespace xgboost namespace dmlc { DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true); } #endif // XGBOOST_DATA_H_