source
stringlengths
3
92
c
stringlengths
26
2.25M
comm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /** * Copyright (c) 2015 by Contributors */ #ifndef MXNET_KVSTORE_COMM_H_ #define MXNET_KVSTORE_COMM_H_ #include <dmlc/omp.h> #include <string> #include <algorithm> #include <utility> #include <limits> #include <vector> #include <tuple> #include <thread> #include "mxnet/ndarray.h" #include "gradient_compression.h" #include "../ndarray/ndarray_function.h" #include "../operator/tensor/sparse_retain-inl.h" #include "./kvstore_utils.h" namespace mxnet { namespace kvstore { /** * \brief multiple device commmunication */ class Comm { public: Comm() { pinned_ctx_ = Context::CPUPinned(0); } virtual ~Comm() { } /** * \brief init key with the data shape and storage shape */ virtual void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) = 0; /** * \brief returns src[0] + .. + src[src.size()-1] */ virtual const NDArray& Reduce( int key, const std::vector<NDArray>& src, int priority) = 0; /** * \brief copy from src to dst[i] for every i */ virtual void Broadcast( int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) = 0; /** * \brief broadcast src to dst[i] with target row_ids for every i * \param key the identifier key for the stored ndarray * \param src the source row_sparse ndarray to broadcast * \param dst a list of destination row_sparse NDArray and its target row_ids to broadcast, where the row_ids are expected to be unique and sorted in row_id.data() * \param priority the priority of the operation */ virtual void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) = 0; /** * \brief return a pinned contex */ Context pinned_ctx() const { return pinned_ctx_; } /** * \brief Sets gradient compression parameters to be able to * perform reduce with compressed gradients */ void SetGradientCompression(std::shared_ptr<GradientCompression> gc) { gc_ = gc; } protected: Context pinned_ctx_; std::shared_ptr<GradientCompression> gc_; }; /** * \brief an implemention of Comm that first copy data to CPU memeory, and then * reduce there */ class CommCPU : public Comm { public: CommCPU() { nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4); bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000); // TODO(junwu) delete the following data member, now for benchmark only is_serial_push_ = dmlc::GetEnv("MXNET_KVSTORE_SERIAL_PUSH", 0); } virtual ~CommCPU() { } void Init(int key, const NDArrayStorageType stype, const TShape& shape, int type = mshadow::kFloat32) override { // Delayed allocation - the dense merged buffer might not be used at all if push() // only sees sparse arrays bool delay_alloc = true; merge_buf_[key].merged = NDArray(shape, pinned_ctx_, delay_alloc, type); } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { auto& buf = merge_buf_[key]; const auto stype = src[0].storage_type(); // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { if (stype == kDefaultStorage) { return src[0]; } else { // With 'local' kvstore, we could store the weight on CPU while compute // the gradient on GPU when the weight is extremely large. // To avoiding copying the weight to the same context of the gradient, // we always copy the gradient to merged buf. NDArray& merged = buf.merged_buf(stype); CopyFromTo(src[0], &merged, priority); return merged; } } NDArray& buf_merged = buf.merged_buf(stype); // normal dense reduce if (stype == kDefaultStorage) { std::vector<Engine::VarHandle> const_vars(src.size() - 1); std::vector<NDArray> reduce(src.size()); CopyFromTo(src[0], &buf_merged, priority); reduce[0] = buf_merged; if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()-1); for (size_t j = 0; j < src.size() - 1; ++j) { // allocate copy buffer buf.copy_buf[j] = NDArray( src[0].shape(), pinned_ctx_, false, src[0].dtype()); } } CHECK(stype == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << stype << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 1; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i-1]), priority); reduce[i] = buf.copy_buf[i-1]; const_vars[i-1] = reduce[i].var(); } Engine::Get()->PushAsync( [reduce, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { ReduceSumCPU(reduce); on_complete(); }, Context::CPU(), const_vars, {reduce[0].var()}, FnProperty::kCPUPrioritized, priority, "KVStoreReduce"); } else { // sparse reduce std::vector<Engine::VarHandle> const_vars(src.size()); std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray( src[0].storage_type(), src[0].shape(), pinned_ctx_, true, src[0].dtype()); } } CHECK(stype == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << stype << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; const_vars[i] = reduce[i].var(); } Resource rsc = ResourceManager::Get()->Request(buf_merged.ctx(), ResourceRequest(ResourceRequest::kTempSpace)); Engine::Get()->PushAsync( [reduce, buf_merged, rsc, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { NDArray out = buf_merged; is_serial_push_? ReduceSumCPUExSerial(reduce, &out) : mxnet::ndarray::ElementwiseSum(rctx.get_stream<cpu>(), rsc, reduce, &out); on_complete(); }, Context::CPU(), const_vars, {buf_merged.var(), rsc.var}, FnProperty::kCPUPrioritized, priority, "KVStoreReduce"); } return buf_merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { int mask = src.ctx().dev_mask(); if (mask == Context::kCPU) { for (auto d : dst) CopyFromTo(src, d, priority); } else { // First copy data to pinned_ctx, then broadcast. // Note that kv.init initializes the data on pinned_ctx. // This branch indicates push() with ndarrays on gpus were called, // and the source is copied to gpu ctx. // Also indicates that buffers are already initialized during push(). auto& buf = merge_buf_[key].merged_buf(src.storage_type()); CopyFromTo(src, &buf, priority); for (auto d : dst) CopyFromTo(buf, d, priority); } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) override { using namespace mshadow; CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; CHECK_EQ(src.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with src on gpu context not supported"; for (size_t i = 0; i < dst.size(); ++i) { NDArray* out = dst[i].first; NDArray row_id = dst[i].second; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with row_indices on gpu context not supported"; // retain according to unique indices const bool is_same_ctx = out->ctx() == src.ctx(); const bool is_diff_var = out->var() != src.var(); NDArray retained_cpu = (is_same_ctx && is_diff_var) ? *out : NDArray(kRowSparseStorage, src.shape(), src.ctx(), true, src.dtype(), src.aux_types()); if (!is_diff_var) { common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) + "refers to the same NDArray as the one stored in KVStore." "Performing row_sparse_pull() with such output is going to change the " "data stored in KVStore. Incorrect result may be generated " "next time row_sparse_pull() is called. To avoid such an issue," "consider create a new NDArray buffer to store the output."); } Engine::Get()->PushAsync( [=](RunContext rctx, Engine::CallbackOnComplete on_complete) { const TBlob& indices = row_id.data(); NDArray temp = retained_cpu; // get rid the of const qualifier op::SparseRetainOpForwardRspImpl<cpu>(rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); on_complete(); }, Context::CPU(), {src.var(), row_id.var()}, {retained_cpu.var()}, FnProperty::kNormal, priority, "KVStoreSparseRetain"); // if retained_cpu == out, CopyFromTo will ignore the copy operation CopyFromTo(retained_cpu, out, priority); } } private: // reduce sum into val[0] inline void ReduceSumCPU(const std::vector<NDArray> &in_data) { MSHADOW_TYPE_SWITCH(in_data[0].dtype(), DType, { std::vector<DType*> dptr(in_data.size()); for (size_t i = 0; i < in_data.size(); ++i) { TBlob data = in_data[i].data(); CHECK(data.CheckContiguous()); dptr[i] = data.FlatTo2D<cpu, DType>().dptr_; } size_t total = in_data[0].shape().Size(); ReduceSumCPUImpl(dptr, total); }); } // serial implementation of reduce sum for row sparse NDArray. inline void ReduceSumCPUExSerial(const std::vector<NDArray> &in, NDArray *out) { using namespace rowsparse; using namespace mshadow; auto stype = out->storage_type(); CHECK_EQ(stype, kRowSparseStorage) << "Unexpected storage type " << stype; size_t total_num_rows = 0; size_t num_in = in.size(); // skip the ones with empty indices and values std::vector<bool> skip(num_in, false); // the values tensor of the inputs MSHADOW_TYPE_SWITCH(out->dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, { std::vector<Tensor<cpu, 2, DType>> in_vals(num_in); std::vector<Tensor<cpu, 1, IType>> in_indices(num_in); // offset to the values tensor of all inputs std::vector<size_t> offsets(num_in, 0); std::vector<size_t> num_rows(num_in, 0); for (size_t i = 0; i < num_in; i++) { if (!in[i].storage_initialized()) { skip[i] = true; continue; } auto size = in[i].aux_shape(kIdx).Size(); num_rows[i] = size; total_num_rows += size; in_vals[i] = in[i].data().FlatTo2D<cpu, DType>(); in_indices[i] = in[i].aux_data(kIdx).FlatTo1D<cpu, IType>(); } std::vector<IType> indices; indices.reserve(total_num_rows); // gather indices from all inputs for (size_t i = 0; i < num_in; i++) { for (size_t j = 0; j < num_rows[i]; j++) { indices.emplace_back(in_indices[i][j]); } } CHECK_EQ(indices.size(), total_num_rows); // dedup indices std::sort(indices.begin(), indices.end()); indices.resize(std::unique(indices.begin(), indices.end()) - indices.begin()); // the one left are unique non-zero rows size_t nnr = indices.size(); // allocate memory for output out->CheckAndAlloc({Shape1(nnr)}); auto idx_data = out->aux_data(kIdx).FlatTo1D<cpu, IType>(); auto val_data = out->data().FlatTo2D<cpu, DType>(); for (size_t i = 0; i < nnr; i++) { // copy indices back idx_data[i] = indices[i]; bool zeros = true; for (size_t j = 0; j < num_in; j++) { if (skip[j]) continue; size_t offset = offsets[j]; if (offset < num_rows[j]) { if (indices[i] == in_indices[j][offset]) { if (zeros) { Copy(val_data[i], in_vals[j][offset], nullptr); zeros = false; } else { val_data[i] += in_vals[j][offset]; } offsets[j] += 1; } } } } }); }); } template<typename DType> inline static void ReduceSumCPU( const std::vector<DType*> &dptr, size_t offset, index_t size) { using namespace mshadow; // NOLINT(*) Tensor<cpu, 1, DType> in_0(dptr[0] + offset, Shape1(size)); for (size_t i = 1; i < dptr.size(); i+=4) { switch (dptr.size() - i) { case 1: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); in_0 += in_1; break; } case 2: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); in_0 += in_1 + in_2; break; } case 3: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3; break; } default: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_4(dptr[i+3] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3 + in_4; break; } } } } template<typename DType> inline void ReduceSumCPUImpl(std::vector<DType*> dptr, size_t total) { const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10)); long ntask = (total + step - 1) / step; // NOLINT(*) if (total < bigarray_bound_ || nthread_reduction_ <= 1) { ReduceSumCPU(dptr, 0, total); } else { #pragma omp parallel for schedule(static) num_threads(nthread_reduction_) for (long j = 0; j < ntask; ++j) { // NOLINT(*) size_t k = static_cast<size_t>(j); size_t begin = std::min(k * step, total); size_t end = std::min((k + 1) * step, total); if (j == ntask - 1) CHECK_EQ(end, total); ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin)); } } } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the merged value NDArray merged; /// \brief the cpu buffer for gpu data std::vector<NDArray> copy_buf; /// \brief the merged buffer for the given storage type inline NDArray& merged_buf(NDArrayStorageType stype) { if (stype == kDefaultStorage) { return merged; } CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype; // check if sparse_merged is initialized if (sparse_merged.is_none()) { CHECK(!merged.is_none()); sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(), true, merged.dtype()); } return sparse_merged; } private: /// \brief the sparse merged value NDArray sparse_merged; }; std::unordered_map<int, BufferEntry> merge_buf_; size_t bigarray_bound_; int nthread_reduction_; bool is_serial_push_; }; /** * \brief an implementation of Comm that performs reduction on device * directly. * * It is faster if the total device-to-device bandwidths is larger than * device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device * memory. */ class CommDevice : public Comm { public: CommDevice() { inited_ = false; } virtual ~CommDevice() { } void Init(int key, const NDArrayStorageType stype, const TShape& shape, int dtype = mshadow::kFloat32) override { sorted_key_attrs_.emplace_back(key, shape, dtype); inited_ = false; } void InitBuffersAndComm(const std::vector<NDArray>& src) { if (!inited_) { std::vector<Context> devs; for (const auto& a : src) { devs.push_back(a.ctx()); } InitMergeBuffer(devs); if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) { EnableP2P(devs); } } } const NDArray& ReduceRowSparse(int key, const std::vector<NDArray>& src, int priority) { auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); const NDArrayStorageType stype = src[0].storage_type(); NDArray& buf_merged = buf.merged_buf(stype); if (buf.copy_buf.empty()) { // initialize buffer for copying during reduce buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray(stype, src[0].shape(), buf_merged.ctx(), true, src[0].dtype()); } } CHECK(src[0].storage_type() == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << src[0].storage_type() << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf_merged, priority); return buf_merged; } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { // when this reduce is called from kvstore_dist, gc is not set // we don't do compression twice in dist_sync_device if ((gc_ != nullptr) && (gc_->get_type() != CompressionType::kNone)) { return ReduceCompressed(key, src, priority); } // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { return src[0]; } InitBuffersAndComm(src); auto& buf = merge_buf_[key]; const NDArrayStorageType stype = src[0].storage_type(); NDArray& buf_merged = buf.merged_buf(stype); // normal dense reduce if (stype == kDefaultStorage) { CopyFromTo(src[0], &buf_merged, priority); std::vector<NDArray> reduce(src.size()); reduce[0] = buf_merged; if (buf.copy_buf.empty()) { // TODO(mli) this results in large device memory usage for huge ndarray, // such as the largest fullc in VGG. consider to do segment reduce with // NDArray.Slice or gpu direct memory access. for the latter, we need to // remove some ctx check, and also it reduces 20% perf buf.copy_buf.resize(src.size()-1); for (size_t i = 0; i < src.size()-1; ++i) { buf.copy_buf[i] = NDArray( buf_merged.shape(), buf_merged.ctx(), false, buf_merged.dtype()); } } for (size_t i = 0; i < src.size()-1; ++i) { CopyFromTo(src[i+1], &(buf.copy_buf[i]), priority); reduce[i+1] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf_merged, priority); } else { // sparse reduce buf_merged = ReduceRowSparse(key, src, priority); } return buf_merged; } const NDArray& ReduceCompressed(int key, const std::vector<NDArray>& src, int priority) { InitBuffersAndComm(src); auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { // one buf for each context buf.copy_buf.resize(src.size()); buf.compressed_recv_buf.resize(src.size()); buf.compressed_send_buf.resize(src.size()); buf.residual.resize(src.size()); for (size_t i = 0; i < src.size(); ++i) { buf.copy_buf[i] = NDArray(buf.merged.shape(), buf.merged.ctx(), false, buf.merged.dtype()); buf.residual[i] = NDArray(buf.merged.shape(), src[i].ctx(), false, buf.merged.dtype()); buf.residual[i] = 0; int64_t small_size = gc_->GetCompressedSize(buf.merged.shape().Size()); buf.compressed_recv_buf[i] = NDArray(TShape{small_size}, buf.merged.ctx(), false, buf.merged.dtype()); buf.compressed_send_buf[i] = NDArray(TShape{small_size}, src[i].ctx(), false, buf.merged.dtype()); } } for (size_t i = 0; i < src.size(); ++i) { // compress before copy // this is done even if the data is on same context as copy_buf because // we don't want the training to be biased towards data on this GPU gc_->Quantize(src[i], &(buf.compressed_send_buf[i]), &(buf.residual[i]), priority); if (buf.compressed_send_buf[i].ctx() != buf.compressed_recv_buf[i].ctx()) { CopyFromTo(buf.compressed_send_buf[i], &(buf.compressed_recv_buf[i]), priority); } else { // avoid memory copy when they are on same context buf.compressed_recv_buf[i] = buf.compressed_send_buf[i]; } gc_->Dequantize(buf.compressed_recv_buf[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf.merged); return buf.merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { if (!inited_) { // copy to a random device first int dev_id = key % dst.size(); CopyFromTo(src, dst[dev_id], priority); for (size_t i = 0; i < dst.size(); ++i) { if (i != static_cast<size_t>(dev_id)) { CopyFromTo(*dst[dev_id], dst[i], priority); } } } else { auto& buf_merged = merge_buf_[key].merged_buf(src.storage_type()); CopyFromTo(src, &buf_merged, priority); for (auto d : dst) { CopyFromTo(buf_merged, d, priority); } } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) override { CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; for (size_t i = 0; i < dst.size(); ++i) { NDArray* out = dst[i].first; NDArray row_id = dst[i].second; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx(), src.ctx()) << "row_id and src are expected to be on the same context"; // retain according to indices const bool is_same_ctx = out->ctx() == src.ctx(); const bool is_diff_var = out->var() != src.var(); NDArray retained_gpu = (is_same_ctx && is_diff_var) ? *out : NDArray(kRowSparseStorage, out->shape(), src.ctx(), true, out->dtype(), out->aux_types()); if (!is_diff_var) { common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) + "refers to the same NDArray as the one stored in KVStore." "Performing row_sparse_pull() with such output is going to change the " "data stored in KVStore. Incorrect result may be generated " "next time row_sparse_pull() is called. To avoid such an issue," "consider create a new NDArray buffer to store the output."); } bool is_gpu = retained_gpu.ctx().dev_mask() == gpu::kDevMask; Engine::Get()->PushAsync([=](RunContext rctx, Engine::CallbackOnComplete on_complete) { const TBlob& indices = row_id.data(); using namespace mxnet::common; NDArray temp = retained_gpu; switch (temp.ctx().dev_mask()) { case cpu::kDevMask: { SparseRetainOpForwardRspWrapper<cpu>(rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); break; } #if MXNET_USE_CUDA case gpu::kDevMask: { SparseRetainOpForwardRspWrapper<gpu>(rctx.get_stream<gpu>(), src, indices, kWriteTo, &temp); // wait for GPU operations to complete rctx.get_stream<gpu>()->Wait(); break; } #endif default: LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR; } on_complete(); }, retained_gpu.ctx(), {src.var(), row_id.var()}, {retained_gpu.var()}, is_gpu ? FnProperty::kGPUPrioritized : FnProperty::kCPUPrioritized, priority, "KVStoreSparseRetain"); CopyFromTo(retained_gpu, out, priority); } } using KeyAttrs = std::tuple<int, TShape, int>; // try to allocate buff on device evenly void InitMergeBuffer(const std::vector<Context>& devs) { std::sort(sorted_key_attrs_.begin(), sorted_key_attrs_.end(), []( const KeyAttrs& a, const KeyAttrs& b) { return std::get<1>(a).Size() > std::get<1>(b).Size(); }); std::unordered_map<int, std::pair<Context, size_t>> ctx_info; for (auto d : devs) { ctx_info[d.dev_id] = std::make_pair(d, 0); } for (size_t i = 0; i < sorted_key_attrs_.size(); ++i) { const int key = std::get<0>(sorted_key_attrs_[i]); const TShape& shape = std::get<1>(sorted_key_attrs_[i]); const int type = std::get<2>(sorted_key_attrs_[i]); auto& buf = merge_buf_[key]; Context ctx; size_t min_size = std::numeric_limits<size_t>::max(); for (auto it = ctx_info.begin(); it != ctx_info.end(); ++it) { size_t size = it->second.second; if (size <= min_size) { ctx = it->second.first; min_size = size; } } // Delayed allocation - as the dense merged buffer might not be used at all if push() // only sees sparse arrays if (buf.merged.is_none()) { bool delay_alloc = true; buf.merged = NDArray(shape, ctx, delay_alloc, type); } ctx_info[ctx.dev_id].second += shape.Size(); } inited_ = true; } private: void EnableP2P(const std::vector<Context>& devs) { #if MXNET_USE_CUDA std::vector<int> gpus; for (const auto& d : devs) { if (d.dev_mask() == gpu::kDevMask) { gpus.push_back(d.dev_id); } } int n = static_cast<int>(gpus.size()); int enabled = 0; std::vector<int> p2p(n*n); // Restores active device to what it was before EnableP2P mxnet::common::cuda::DeviceStore device_store; for (int i = 0; i < n; ++i) { device_store.SetDevice(gpus[i]); for (int j = 0; j < n; j++) { int access; cudaDeviceCanAccessPeer(&access, gpus[i], gpus[j]); if (access) { cudaError_t e = cudaDeviceEnablePeerAccess(gpus[j], 0); if (e == cudaSuccess || e == cudaErrorPeerAccessAlreadyEnabled) { ++enabled; p2p[i*n+j] = 1; } } } } if (enabled != n*(n-1)) { // print warning info if not fully enabled LOG(WARNING) << "only " << enabled << " out of " << n*(n-1) << " GPU pairs are enabled direct access. " << "It may affect the performance. " << "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off"; std::string access(n, '.'); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { access[j] = p2p[i*n+j] ? 'v' : '.'; } LOG(WARNING) << access; } } #endif } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the dense merged value for reduce and broadcast operations NDArray merged; /// \brief the gpu buffer for copy during reduce operation std::vector<NDArray> copy_buf; /// \brief the residual buffer for gradient compression std::vector<NDArray> residual; /// \brief the small buffer for compressed data in sender std::vector<NDArray> compressed_send_buf; /// \brief the small buffer for compressed data in receiver std::vector<NDArray> compressed_recv_buf; /// \brief the merged buffer for the given storage type (could be either dense or row_sparse) inline NDArray& merged_buf(NDArrayStorageType stype) { if (stype == kDefaultStorage) { CHECK(!merged.is_none()) << "unintialized merge buffer detected"; return merged; } CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype; // check if sparse_merged is initialized if (sparse_merged.is_none()) { CHECK(!merged.is_none()); sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(), true, merged.dtype()); } return sparse_merged; } private: /// \brief the sparse merged value for reduce and rowsparse broadcast operations NDArray sparse_merged; }; std::unordered_map<int, BufferEntry> merge_buf_; public: bool inited_; std::vector<KeyAttrs> sorted_key_attrs_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_COMM_H_
ten_tusscher_2004_epi_S3_20.c
//Original Ten Tusscher #include <assert.h> #include <stdlib.h> #include "ten_tusscher_2004_epi_S3_20.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } //TODO: this should be called only once for the whole mesh, like in the GPU code SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.5416381029710,0.00129742313431501,0.779058087874356,0.778951275699783,0.000175400267410166,0.484813241067308,0.00294587325391635,0.999998339341719,1.94207059338896e-08,1.89778840917076e-05,0.999772653033000,1.00721993170388,0.999996907554520,4.22421886024410e-05,0.744054308738152,10.2766651112694,139.172056496758}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito //#ifdef EPI real Gto=0.294; //#endif // #ifdef ENDO // real Gto=0.073; //#endif //#ifdef MCELL // real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.3267748982652,0.000369396880965334,0.000138718412791722,0.000274765995516752,0.222557047894483,0.131225943240472,0.194018855199521,4.70098964246625,0.0175173968211143,1.45392118187522,1093.48753540057,0.000621762218099826,0.341961934777053,0.0120870127836469,0.00451100911423527,3.31418392030779e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; ///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
KDTree.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef _SPTAG_COMMON_KDTREE_H_ #define _SPTAG_COMMON_KDTREE_H_ #include <vector> #include <string> #include <shared_mutex> #include "../VectorIndex.h" #include "CommonUtils.h" #include "QueryResultSet.h" #include "WorkSpace.h" namespace SPTAG { namespace COMMON { // node type for storing KDT struct KDTNode { SizeType left; SizeType right; DimensionType split_dim; float split_value; }; class KDTree { public: KDTree() : m_iTreeNumber(2), m_numTopDimensionKDTSplit(5), m_iSamples(1000), m_lock(new std::shared_timed_mutex) {} KDTree(const KDTree& other) : m_iTreeNumber(other.m_iTreeNumber), m_numTopDimensionKDTSplit(other.m_numTopDimensionKDTSplit), m_iSamples(other.m_iSamples), m_lock(new std::shared_timed_mutex) {} ~KDTree() {} inline const KDTNode& operator[](SizeType index) const { return m_pTreeRoots[index]; } inline KDTNode& operator[](SizeType index) { return m_pTreeRoots[index]; } inline SizeType size() const { return (SizeType)m_pTreeRoots.size(); } inline SizeType sizePerTree() const { std::shared_lock<std::shared_timed_mutex> lock(*m_lock); return (SizeType)m_pTreeRoots.size() - m_pTreeStart.back(); } template <typename T> void Rebuild(const Dataset<T>& data, IAbortOperation* abort) { COMMON::KDTree newTrees(*this); newTrees.BuildTrees<T>(data, 1, nullptr, abort); std::unique_lock<std::shared_timed_mutex> lock(*m_lock); m_pTreeRoots.swap(newTrees.m_pTreeRoots); m_pTreeStart.swap(newTrees.m_pTreeStart); } template <typename T> void BuildTrees(const Dataset<T>& data, int numOfThreads, std::vector<SizeType>* indices = nullptr, IAbortOperation* abort = nullptr) { std::vector<SizeType> localindices; if (indices == nullptr) { localindices.resize(data.R()); for (SizeType i = 0; i < localindices.size(); i++) localindices[i] = i; } else { localindices.assign(indices->begin(), indices->end()); } m_pTreeRoots.resize(m_iTreeNumber * localindices.size()); m_pTreeStart.resize(m_iTreeNumber, 0); #pragma omp parallel for num_threads(numOfThreads) for (int i = 0; i < m_iTreeNumber; i++) { if (abort && abort->ShouldAbort()) continue; Sleep(i * 100); std::srand(clock()); std::vector<SizeType> pindices(localindices.begin(), localindices.end()); std::random_shuffle(pindices.begin(), pindices.end()); m_pTreeStart[i] = i * (SizeType)pindices.size(); LOG(Helper::LogLevel::LL_Info, "Start to build KDTree %d\n", i + 1); SizeType iTreeSize = m_pTreeStart[i]; DivideTree<T>(data, pindices, 0, (SizeType)pindices.size() - 1, m_pTreeStart[i], iTreeSize, abort); LOG(Helper::LogLevel::LL_Info, "%d KDTree built, %d %zu\n", i + 1, iTreeSize - m_pTreeStart[i], pindices.size()); } } inline std::uint64_t BufferSize() const { return sizeof(int) + sizeof(SizeType) * m_iTreeNumber + sizeof(SizeType) + sizeof(KDTNode) * m_pTreeRoots.size(); } ErrorCode SaveTrees(std::shared_ptr<Helper::DiskPriorityIO> p_out) const { std::shared_lock<std::shared_timed_mutex> lock(*m_lock); IOBINARY(p_out, WriteBinary, sizeof(m_iTreeNumber), (char*)&m_iTreeNumber); IOBINARY(p_out, WriteBinary, sizeof(SizeType) * m_iTreeNumber, (char*)m_pTreeStart.data()); SizeType treeNodeSize = (SizeType)m_pTreeRoots.size(); IOBINARY(p_out, WriteBinary, sizeof(treeNodeSize), (char*)&treeNodeSize); IOBINARY(p_out, WriteBinary, sizeof(KDTNode) * treeNodeSize, (char*)m_pTreeRoots.data()); LOG(Helper::LogLevel::LL_Info, "Save KDT (%d,%d) Finish!\n", m_iTreeNumber, treeNodeSize); return ErrorCode::Success; } ErrorCode SaveTrees(std::string sTreeFileName) const { LOG(Helper::LogLevel::LL_Info, "Save KDT to %s\n", sTreeFileName.c_str()); auto ptr = f_createIO(); if (ptr == nullptr || !ptr->Initialize(sTreeFileName.c_str(), std::ios::binary | std::ios::out)) return ErrorCode::FailedCreateFile; return SaveTrees(ptr); } ErrorCode LoadTrees(char* pKDTMemFile) { m_iTreeNumber = *((int*)pKDTMemFile); pKDTMemFile += sizeof(int); m_pTreeStart.resize(m_iTreeNumber); memcpy(m_pTreeStart.data(), pKDTMemFile, sizeof(SizeType) * m_iTreeNumber); pKDTMemFile += sizeof(SizeType)*m_iTreeNumber; SizeType treeNodeSize = *((SizeType*)pKDTMemFile); pKDTMemFile += sizeof(SizeType); m_pTreeRoots.resize(treeNodeSize); memcpy(m_pTreeRoots.data(), pKDTMemFile, sizeof(KDTNode) * treeNodeSize); LOG(Helper::LogLevel::LL_Info, "Load KDT (%d,%d) Finish!\n", m_iTreeNumber, treeNodeSize); return ErrorCode::Success; } ErrorCode LoadTrees(std::shared_ptr<Helper::DiskPriorityIO> p_input) { IOBINARY(p_input, ReadBinary, sizeof(m_iTreeNumber), (char*)&m_iTreeNumber); m_pTreeStart.resize(m_iTreeNumber); IOBINARY(p_input, ReadBinary, sizeof(SizeType) * m_iTreeNumber, (char*)m_pTreeStart.data()); SizeType treeNodeSize; IOBINARY(p_input, ReadBinary, sizeof(treeNodeSize), (char*)&treeNodeSize); m_pTreeRoots.resize(treeNodeSize); IOBINARY(p_input, ReadBinary, sizeof(KDTNode) * treeNodeSize, (char*)m_pTreeRoots.data()); LOG(Helper::LogLevel::LL_Info, "Load KDT (%d,%d) Finish!\n", m_iTreeNumber, treeNodeSize); return ErrorCode::Success; } ErrorCode LoadTrees(std::string sTreeFileName) { LOG(Helper::LogLevel::LL_Info, "Load KDT From %s\n", sTreeFileName.c_str()); auto ptr = f_createIO(); if (ptr == nullptr || !ptr->Initialize(sTreeFileName.c_str(), std::ios::binary | std::ios::in)) return ErrorCode::FailedOpenFile; return LoadTrees(ptr); } template <typename T> void InitSearchTrees(const Dataset<T>& p_data, float(*fComputeDistance)(const T* pX, const T* pY, DimensionType length), const COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace &p_space) const { for (int i = 0; i < m_iTreeNumber; i++) { KDTSearch(p_data, fComputeDistance, p_query, p_space, m_pTreeStart[i], 0); } } template <typename T> void SearchTrees(const Dataset<T>& p_data, float(*fComputeDistance)(const T* pX, const T* pY, DimensionType length), const COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace &p_space, const int p_limits) const { while (!p_space.m_SPTQueue.empty() && p_space.m_iNumberOfCheckedLeaves < p_limits) { auto& tcell = p_space.m_SPTQueue.pop(); KDTSearch(p_data, fComputeDistance, p_query, p_space, tcell.node, tcell.distance); } } private: template <typename T> void KDTSearch(const Dataset<T>& p_data, float(*fComputeDistance)(const T* pX, const T* pY, DimensionType length), const COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace& p_space, const SizeType node, const float distBound) const { if (node < 0) { SizeType index = -node - 1; if (index >= p_data.R()) return; #ifdef PREFETCH const T* data = p_data[index]; _mm_prefetch((const char*)data, _MM_HINT_T0); _mm_prefetch((const char*)(data + 64), _MM_HINT_T0); #endif if (p_space.CheckAndSet(index)) return; ++p_space.m_iNumberOfTreeCheckedLeaves; ++p_space.m_iNumberOfCheckedLeaves; p_space.m_NGQueue.insert(NodeDistPair(index, fComputeDistance(p_query.GetTarget(), data, p_data.C()))); return; } auto& tnode = m_pTreeRoots[node]; float diff = (p_query.GetTarget())[tnode.split_dim] - tnode.split_value; float distanceBound = distBound + diff * diff; SizeType otherChild, bestChild; if (diff < 0) { bestChild = tnode.left; otherChild = tnode.right; } else { otherChild = tnode.left; bestChild = tnode.right; } p_space.m_SPTQueue.insert(NodeDistPair(otherChild, distanceBound)); KDTSearch(p_data, fComputeDistance, p_query, p_space, bestChild, distBound); } template <typename T> void DivideTree(const Dataset<T>& data, std::vector<SizeType>& indices, SizeType first, SizeType last, SizeType index, SizeType &iTreeSize, IAbortOperation* abort = nullptr) { if (abort && abort->ShouldAbort()) return; ChooseDivision<T>(data, m_pTreeRoots[index], indices, first, last); SizeType i = Subdivide<T>(data, m_pTreeRoots[index], indices, first, last); if (i - 1 <= first) { m_pTreeRoots[index].left = -indices[first] - 1; } else { iTreeSize++; m_pTreeRoots[index].left = iTreeSize; DivideTree<T>(data, indices, first, i - 1, iTreeSize, iTreeSize); } if (last == i) { m_pTreeRoots[index].right = -indices[last] - 1; } else { iTreeSize++; m_pTreeRoots[index].right = iTreeSize; DivideTree<T>(data, indices, i, last, iTreeSize, iTreeSize); } } template <typename T> void ChooseDivision(const Dataset<T>& data, KDTNode& node, const std::vector<SizeType>& indices, const SizeType first, const SizeType last) { std::vector<float> meanValues(data.C(), 0); std::vector<float> varianceValues(data.C(), 0); SizeType end = min(first + m_iSamples, last); SizeType count = end - first + 1; // calculate the mean of each dimension for (SizeType j = first; j <= end; j++) { const T* v = (const T*)data[indices[j]]; for (DimensionType k = 0; k < data.C(); k++) { meanValues[k] += v[k]; } } for (DimensionType k = 0; k < data.C(); k++) { meanValues[k] /= count; } // calculate the variance of each dimension for (SizeType j = first; j <= end; j++) { const T* v = (const T*)data[indices[j]]; for (DimensionType k = 0; k < data.C(); k++) { float dist = v[k] - meanValues[k]; varianceValues[k] += dist*dist; } } // choose the split dimension as one of the dimension inside TOP_DIM maximum variance node.split_dim = SelectDivisionDimension(varianceValues); // determine the threshold node.split_value = meanValues[node.split_dim]; } DimensionType SelectDivisionDimension(const std::vector<float>& varianceValues) const { // Record the top maximum variances std::vector<DimensionType> topind(m_numTopDimensionKDTSplit); int num = 0; // order the variances for (DimensionType i = 0; i < (DimensionType)varianceValues.size(); i++) { if (num < m_numTopDimensionKDTSplit || varianceValues[i] > varianceValues[topind[num - 1]]) { if (num < m_numTopDimensionKDTSplit) { topind[num++] = i; } else { topind[num - 1] = i; } int j = num - 1; // order the TOP_DIM variances while (j > 0 && varianceValues[topind[j]] > varianceValues[topind[j - 1]]) { std::swap(topind[j], topind[j - 1]); j--; } } } // randomly choose a dimension from TOP_DIM return topind[COMMON::Utils::rand(num)]; } template <typename T> SizeType Subdivide(const Dataset<T>& data, const KDTNode& node, std::vector<SizeType>& indices, const SizeType first, const SizeType last) const { SizeType i = first; SizeType j = last; // decide which child one point belongs while (i <= j) { SizeType ind = indices[i]; const T* v = (const T*)data[ind]; float val = v[node.split_dim]; if (val < node.split_value) { i++; } else { std::swap(indices[i], indices[j]); j--; } } // if all the points in the node are equal,equally split the node into 2 if ((i == first) || (i == last + 1)) { i = (first + last + 1) / 2; } return i; } private: std::vector<SizeType> m_pTreeStart; std::vector<KDTNode> m_pTreeRoots; public: std::unique_ptr<std::shared_timed_mutex> m_lock; int m_iTreeNumber, m_numTopDimensionKDTSplit, m_iSamples; }; } } #endif
GB_unop__identity_fp64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp64_fc64) // op(A') function: GB (_unop_tran__identity_fp64_fc64) // C type: double // A type: GxB_FC64_t // cast: double cij = (double) creal (aij) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) creal (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) creal (aij) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp64_fc64) ( double *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; double z = (double) creal (aij) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; double z = (double) creal (aij) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pp_collision.c
/* Copyright (C) 2017 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include "imag_self_energy_with_g.h" #include "interaction.h" #include "phonoc_array.h" #include "phonoc_utils.h" #include "pp_collision.h" #include "triplet.h" #include "triplet_iw.h" #include "lapack_wrapper.h" static void get_collision(double *ise, const long num_band0, const long num_band, const long num_temps, const double *temperatures, const double *g, const char *g_zero, const double *frequencies, const lapack_complex_double *eigenvectors, const long triplet[3], const long triplet_weight, const ConstBZGrid *bzgrid, const double *fc3, const long is_compact_fc3, const double *shortest_vectors, const long svecs_dims[3], const long *multiplicity, const double *masses, const long *p2s_map, const long *s2p_map, const long *band_indices, const long symmetrize_fc3_q, const double cutoff_frequency, const long openmp_per_triplets); static void finalize_ise(double *imag_self_energy, const double *ise, const long (*bz_grid_address)[3], const long (*triplets)[3], const long num_triplets, const long num_temps, const long num_band0, const long is_NU); void ppc_get_pp_collision(double *imag_self_energy, const long relative_grid_address[24][4][3], /* thm */ const double *frequencies, const lapack_complex_double *eigenvectors, const long (*triplets)[3], const long num_triplets, const long *triplet_weights, const ConstBZGrid *bzgrid, const double *fc3, const long is_compact_fc3, const double *shortest_vectors, const long svecs_dims[3], const long *multiplicity, const double *masses, const long *p2s_map, const long *s2p_map, const Larray *band_indices, const Darray *temperatures, const long is_NU, const long symmetrize_fc3_q, const double cutoff_frequency) { long i; long num_band, num_band0, num_band_prod, num_temps; long openmp_per_triplets; double *ise, *freqs_at_gp, *g; char *g_zero; long tp_relative_grid_address[2][24][4][3]; ise = NULL; freqs_at_gp = NULL; g = NULL; g_zero = NULL; num_band0 = band_indices->dims[0]; num_band = svecs_dims[1] * 3; num_band_prod = num_band0 * num_band * num_band; num_temps = temperatures->dims[0]; ise = (double*)malloc(sizeof(double) * num_triplets * num_temps * num_band0); freqs_at_gp = (double*)malloc(sizeof(double) * num_band0); for (i = 0; i < num_band0; i++) { freqs_at_gp[i] = frequencies[triplets[0][0] * num_band + band_indices->data[i]]; } if (num_triplets > num_band) { openmp_per_triplets = 1; } else { openmp_per_triplets = 0; } tpl_set_relative_grid_address(tp_relative_grid_address, relative_grid_address, 2); #pragma omp parallel for schedule(guided) private(g, g_zero) if (openmp_per_triplets) for (i = 0; i < num_triplets; i++) { g = (double*)malloc(sizeof(double) * 2 * num_band_prod); g_zero = (char*)malloc(sizeof(char) * num_band_prod); tpi_get_integration_weight(g, g_zero, freqs_at_gp, /* used as f0 */ num_band0, tp_relative_grid_address, triplets[i], 1, bzgrid, frequencies, /* used as f1 */ num_band, frequencies, /* used as f2 */ num_band, 2, 1 - openmp_per_triplets); get_collision(ise + i * num_temps * num_band0, num_band0, num_band, num_temps, temperatures->data, g, g_zero, frequencies, eigenvectors, triplets[i], triplet_weights[i], bzgrid, fc3, is_compact_fc3, shortest_vectors, svecs_dims, multiplicity, masses, p2s_map, s2p_map, band_indices->data, symmetrize_fc3_q, cutoff_frequency, openmp_per_triplets); free(g_zero); g_zero = NULL; free(g); g = NULL; } finalize_ise(imag_self_energy, ise, bzgrid->addresses, triplets, num_triplets, num_temps, num_band0, is_NU); free(freqs_at_gp); freqs_at_gp = NULL; free(ise); ise = NULL; } void ppc_get_pp_collision_with_sigma( double *imag_self_energy, const double sigma, const double sigma_cutoff, const double *frequencies, const lapack_complex_double *eigenvectors, const long (*triplets)[3], const long num_triplets, const long *triplet_weights, const ConstBZGrid *bzgrid, const double *fc3, const long is_compact_fc3, const double *shortest_vectors, const long svecs_dims[3], const long *multiplicity, const double *masses, const long *p2s_map, const long *s2p_map, const Larray *band_indices, const Darray *temperatures, const long is_NU, const long symmetrize_fc3_q, const double cutoff_frequency) { long i; long num_band, num_band0, num_band_prod, num_temps; long openmp_per_triplets, const_adrs_shift; double cutoff; double *ise, *freqs_at_gp, *g; char *g_zero; ise = NULL; freqs_at_gp = NULL; g = NULL; g_zero = NULL; num_band0 = band_indices->dims[0]; num_band = svecs_dims[1] * 3; num_band_prod = num_band0 * num_band * num_band; num_temps = temperatures->dims[0]; const_adrs_shift = num_band_prod; ise = (double*)malloc(sizeof(double) * num_triplets * num_temps * num_band0); freqs_at_gp = (double*)malloc(sizeof(double) * num_band0); for (i = 0; i < num_band0; i++) { freqs_at_gp[i] = frequencies[triplets[0][0] * num_band + band_indices->data[i]]; } if (num_triplets > num_band) { openmp_per_triplets = 1; } else { openmp_per_triplets = 0; } cutoff = sigma * sigma_cutoff; #pragma omp parallel for schedule(guided) private(g, g_zero) if (openmp_per_triplets) for (i = 0; i < num_triplets; i++) { g = (double*)malloc(sizeof(double) * 2 * num_band_prod); g_zero = (char*)malloc(sizeof(char) * num_band_prod); tpi_get_integration_weight_with_sigma(g, g_zero, sigma, cutoff, freqs_at_gp, num_band0, triplets[i], const_adrs_shift, frequencies, num_band, 2, 0); get_collision(ise + i * num_temps * num_band0, num_band0, num_band, num_temps, temperatures->data, g, g_zero, frequencies, eigenvectors, triplets[i], triplet_weights[i], bzgrid, fc3, is_compact_fc3, shortest_vectors, svecs_dims, multiplicity, masses, p2s_map, s2p_map, band_indices->data, symmetrize_fc3_q, cutoff_frequency, openmp_per_triplets); free(g_zero); g_zero = NULL; free(g); g = NULL; } finalize_ise(imag_self_energy, ise, bzgrid->addresses, triplets, num_triplets, num_temps, num_band0, is_NU); free(freqs_at_gp); freqs_at_gp = NULL; free(ise); ise = NULL; } static void get_collision(double *ise, const long num_band0, const long num_band, const long num_temps, const double *temperatures, const double *g, const char *g_zero, const double *frequencies, const lapack_complex_double *eigenvectors, const long triplet[3], const long triplet_weight, const ConstBZGrid *bzgrid, const double *fc3, const long is_compact_fc3, const double *shortest_vectors, const long svecs_dims[3], const long *multiplicity, const double *masses, const long *p2s_map, const long *s2p_map, const long *band_indices, const long symmetrize_fc3_q, const double cutoff_frequency, const long openmp_per_triplets) { long i; long num_band_prod, num_g_pos; double *fc3_normal_squared; long (*g_pos)[4]; fc3_normal_squared = NULL; g_pos = NULL; num_band_prod = num_band0 * num_band * num_band; fc3_normal_squared = (double*)malloc(sizeof(double) * num_band_prod); g_pos = (long(*)[4])malloc(sizeof(long[4]) * num_band_prod); for (i = 0; i < num_band_prod; i++) { fc3_normal_squared[i] = 0; } num_g_pos = ise_set_g_pos(g_pos, num_band0, num_band, g_zero); itr_get_interaction_at_triplet( fc3_normal_squared, num_band0, num_band, g_pos, num_g_pos, frequencies, eigenvectors, triplet, bzgrid, fc3, is_compact_fc3, shortest_vectors, svecs_dims, multiplicity, masses, p2s_map, s2p_map, band_indices, symmetrize_fc3_q, cutoff_frequency, 0, 0, 1 - openmp_per_triplets); ise_imag_self_energy_at_triplet( ise, num_band0, num_band, fc3_normal_squared, frequencies, triplet, triplet_weight, g, g + num_band_prod, g_pos, num_g_pos, temperatures, num_temps, cutoff_frequency, 1 - openmp_per_triplets, 0); free(fc3_normal_squared); fc3_normal_squared = NULL; free(g_pos); g_pos = NULL; } static void finalize_ise(double *imag_self_energy, const double *ise, const long (*bz_grid_addresses)[3], const long (*triplets)[3], const long num_triplets, const long num_temps, const long num_band0, const long is_NU) { long i, j, k; long is_N; if (is_NU) { for (i = 0; i < 2 * num_temps * num_band0; i++) { imag_self_energy[i] = 0; } for (i = 0; i < num_triplets; i++) { is_N = tpl_is_N(triplets[i], bz_grid_addresses); for (j = 0; j < num_temps; j++) { for (k = 0; k < num_band0; k++) { if (is_N) { imag_self_energy[j * num_band0 + k] += ise[i * num_temps * num_band0 + j * num_band0 + k]; } else { imag_self_energy[num_temps * num_band0 + j * num_band0 + k] += ise[i * num_temps * num_band0 + j * num_band0 + k]; } } } } } else { for (i = 0; i < num_temps * num_band0; i++) { imag_self_energy[i] = 0; } for (i = 0; i < num_triplets; i++) { for (j = 0; j < num_temps; j++) { for (k = 0; k < num_band0; k++) { imag_self_energy[j * num_band0 + k] += ise[i * num_temps * num_band0 + j * num_band0 + k]; } } } } }
GB_unop__asinh_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__asinh_fp32_fp32) // op(A') function: GB (_unop_tran__asinh_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = asinhf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = asinhf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = asinhf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ASINH || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__asinh_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = asinhf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = asinhf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__asinh_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pralel.c
#include <omp.h> #include <stdio.h> #define N 100 float A[N], B[N], pe; void AporB() { int j; #pragma omp for reduction(+:pe) for (j=0; j<N; j++) pe += A[j] * B[j]; } main () { int i; for (i=0; i<N; i++) { A[i] = i; B[i] = N-i; } pe = 0.0; #pragma omp parallel { AporB(); } printf("\n\n >> PE = %10.0f\n\n", pe); }
GB_binop__ne_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__ne_uint32 // A.*B function (eWiseMult): GB_AemultB__ne_uint32 // A*D function (colscale): GB_AxD__ne_uint32 // D*A function (rowscale): GB_DxB__ne_uint32 // C+=B function (dense accum): GB_Cdense_accumB__ne_uint32 // C+=b function (dense accum): GB_Cdense_accumb__ne_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ne_uint32 // C=scalar+B GB_bind1st__ne_uint32 // C=scalar+B' GB_bind1st_tran__ne_uint32 // C=A+scalar GB_bind2nd__ne_uint32 // C=A'+scalar GB_bind2nd_tran__ne_uint32 // C type: bool // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x != y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_UINT32 || GxB_NO_NE_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__ne_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__ne_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__ne_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__ne_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__ne_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__ne_uint32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__ne_uint32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__ne_uint32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__ne_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB_bind1st_tran__ne_uint32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB_bind2nd_tran__ne_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_parallel_for_ordered.c
// RUN: %libomp-compile-and-run // REQUIRES: !(abt && (clang || gcc)) #include <stdio.h> #include "omp_testsuite.h" static int last_i = 0; int i; #pragma omp threadprivate(i) /* Variable ii is used to avoid problems with a threadprivate variable used as a loop * index. See test omp_threadprivate_for. */ static int ii; #pragma omp threadprivate(ii) /*! Utility function: returns true if the passed argument is larger than the argument of the last call of this function. */ static int check_i_islarger2(int i) { int islarger; islarger = (i > last_i); last_i = i; return (islarger); } int test_omp_parallel_for_ordered() { int sum; int is_larger; int known_sum; int i; sum = 0; is_larger = 1; last_i = 0; #pragma omp parallel for schedule(static,1) private(i) ordered for (i = 1; i < 100; i++) { ii = i; #pragma omp ordered { is_larger = check_i_islarger2 (ii) && is_larger; sum = sum + ii; } } known_sum = (99 * 100) / 2; fprintf (stderr," known_sum = %d , sum = %d \n", known_sum, sum); fprintf (stderr," is_larger = %d\n", is_larger); return (known_sum == sum) && is_larger; } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_parallel_for_ordered()) { num_failed++; } } return num_failed; }
simd_utils.h
/* * Project : SIMD_Utils * Version : 0.1.12 * Author : JishinMaster * Licence : BSD-2 */ #pragma once #ifdef __cplusplus extern "C" { #endif #define MAJOR_VERSION 0 #define MINOR_VERSION 1 #define SUB_VERSION 11 #ifdef OMP #include <omp.h> #endif #include <math.h> #include <stdint.h> static const float FOPI = 1.27323954473516f; static const float PIO4F = 0.7853981633974483096f; /* Note, these constants are for a 32-bit significand: */ /* static const float DP1 = 0.7853851318359375f; static const float DP2 = 1.30315311253070831298828125e-5f; static const float DP3 = 3.03855025325309630e-11f; static const float lossth = 65536.f; */ /* These are for a 24-bit significand: */ static const float minus_cephes_DP1 = -0.78515625f; static const float minus_cephes_DP2 = -2.4187564849853515625e-4f; static const float minus_cephes_DP3 = -3.77489497744594108e-8f; static float lossth = 8192.; static const float T24M1 = 16777215.f; static const float sincof[] = {-1.9515295891E-4f, 8.3321608736E-3f, -1.6666654611E-1f}; static const float coscof[] = {2.443315711809948E-5f, -1.388731625493765E-3f, 4.166664568298827E-2f}; #define SIGN_MASK 0x80000000 static const int32_t sign_mask = SIGN_MASK; static const int32_t inv_sign_mask = ~SIGN_MASK; #include "mysincosf.h" #define INVLN10 0.4342944819032518f //0.4342944819f #define INVLN2 1.4426950408889634f //1.44269504089f #define IMM8_FLIP_VEC 0x1B // change m128 from abcd to dcba #define IMM8_LO_HI_VEC 0x1E // change m128 from abcd to cdab #define IMM8_PERMUTE_128BITS_LANES 0x1 // reverse abcd efgh to efgh abcd #define M_PI 3.14159265358979323846 typedef struct { int16_t re; int16_t im; } complex16s_t; typedef struct { int32_t re; int32_t im; } complex32s_t; typedef struct { float re; float im; } complex32_t; typedef struct { double re; double im; } complex64_t; typedef enum { RndZero, RndNear, RndFinancial, } FloatRoundingMode; /* if the user insures that all of their pointers are aligned, * they can use ALWAYS_ALIGNED to hope for some minor speedup on small vectors */ static inline int isAligned(uintptr_t ptr, size_t alignment) { #ifndef ALWAYS_ALIGNED if (((uintptr_t) (ptr) % alignment) == 0) return 1; return 0; #else return 1; #endif } static inline int areAligned2(uintptr_t ptr1, uintptr_t ptr2, size_t alignment) { #ifndef ALWAYS_ALIGNED if (((uintptr_t) (ptr1) % alignment) == 0) if (((uintptr_t) (ptr2) % alignment) == 0) return 1; return 0; #else return 1; #endif } static inline int areAligned3(uintptr_t ptr1, uintptr_t ptr2, uintptr_t ptr3, size_t alignment) { #ifndef ALWAYS_ALIGNED if (((uintptr_t) (ptr1) % alignment) == 0) if (((uintptr_t) (ptr2) % alignment) == 0) if (((uintptr_t) (ptr3) % alignment) == 0) return 1; return 0; #else return 1; #endif } static inline void simd_utils_get_version(void) { printf("Simd Utils Version : %d.%d.%d\n", MAJOR_VERSION, MINOR_VERSION, SUB_VERSION); } #ifndef RISCV #ifdef SSE #define SSE_LEN_BYTES 16 // Size of SSE lane #define SSE_LEN_INT16 8 // number of int16 with an SSE lane #define SSE_LEN_INT32 4 // number of int32 with an SSE lane #define SSE_LEN_FLOAT 4 // number of float with an SSE lane #define SSE_LEN_DOUBLE 2 // number of double with an SSE lane #ifndef ARM #include "sse_mathfun.h" #else /* ARM */ #include "neon_mathfun.h" #define _PS_CONST(Name, Val) \ static const ALIGN16_BEG float _ps_##Name[4] ALIGN16_END = {Val, Val, Val, Val} #define _PI32_CONST(Name, Val) \ static const ALIGN16_BEG int _pi32_##Name[4] ALIGN16_END = {Val, Val, Val, Val} #define _PS_CONST_TYPE(Name, Type, Val) \ static const ALIGN16_BEG Type _ps_##Name[4] ALIGN16_END = {Val, Val, Val, Val} #endif /* ARM */ static inline __m128 _mm_fmadd_ps_custom(__m128 a, __m128 b, __m128 c) { #ifndef FMA //Haswell comes with avx2 and fma return _mm_add_ps(_mm_mul_ps(a, b), c); #else /* FMA */ return _mm_fmadd_ps(a, b, c); #endif /* FMA */ } static inline __m128 _mm_fnmadd_ps_custom(__m128 a, __m128 b, __m128 c) { #ifndef FMA //Haswell comes with avx2 and fma return _mm_sub_ps(c, _mm_mul_ps(a, b)); #else /* FMA */ return _mm_fnmadd_ps(a, b, c); #endif /* FMA */ } static inline __m128d _mm_fmadd_pd_custom(__m128d a, __m128d b, __m128d c) { #ifndef FMA //Haswell comes with avx2 and fma return _mm_add_pd(_mm_mul_pd(a, b), c); #else /* FMA */ return _mm_fmadd_pd(a, b, c); #endif /* FMA */ } static inline __m128d _mm_fnmadd_pd_custom(__m128d a, __m128d b, __m128d c) { #ifndef FMA //Haswell comes with avx2 and fma return _mm_sub_pd(c, _mm_mul_pd(a, b)); #else /* FMA */ return _mm_fnmadd_pd(a, b, c); #endif /* FMA */ } #define _PD_CONST(Name, Val) \ static const ALIGN16_BEG double _pd_##Name[2] ALIGN16_END = {Val, Val} #define _PI64_CONST(Name, Val) \ static const ALIGN16_BEG int64_t _pi64_##Name[2] ALIGN16_END = {Val, Val} #define _PD_CONST_TYPE(Name, Type, Val) \ static const ALIGN16_BEG Type _pd_##Name[2] ALIGN16_END = {Val, Val} /* _PD_CONST_TYPE(min_norm_pos, int64_t, 0x380ffff83ce549caL); _PD_CONST_TYPE(mant_mask, int64_t, 0xFFFFFFFFFFFFFL); _PD_CONST_TYPE(inv_mant_mask, int64_t, ~0xFFFFFFFFFFFFFL); _PD_CONST_TYPE(sign_mask, int64_t, (int64_t) 0x8000000000000000L); _PD_CONST_TYPE(inv_sign_mask, int64_t, ~0x8000000000000000L); */ #ifdef ARM _PS_CONST(1, 1.0f); _PS_CONST(0p5, 0.5f); /* the smallest non denormalized float number */ _PS_CONST_TYPE(min_norm_pos, int, 0x00800000); _PS_CONST_TYPE(mant_mask, int, 0x7f800000); _PS_CONST_TYPE(inv_mant_mask, int, ~0x7f800000); _PS_CONST_TYPE(sign_mask, int, (int) 0x80000000); _PS_CONST_TYPE(inv_sign_mask, int, ~0x80000000); _PI32_CONST(1, 1); _PI32_CONST(inv1, ~1); _PI32_CONST(2, 2); _PI32_CONST(4, 4); _PI32_CONST(0x7f, 0x7f); _PS_CONST(cephes_SQRTHF, 0.707106781186547524); _PS_CONST(cephes_log_p0, 7.0376836292E-2); _PS_CONST(cephes_log_p1, -1.1514610310E-1); _PS_CONST(cephes_log_p2, 1.1676998740E-1); _PS_CONST(cephes_log_p3, -1.2420140846E-1); _PS_CONST(cephes_log_p4, +1.4249322787E-1); _PS_CONST(cephes_log_p5, -1.6668057665E-1); _PS_CONST(cephes_log_p6, +2.0000714765E-1); _PS_CONST(cephes_log_p7, -2.4999993993E-1); _PS_CONST(cephes_log_p8, +3.3333331174E-1); _PS_CONST(cephes_log_q1, -2.12194440e-4); _PS_CONST(cephes_log_q2, 0.693359375); _PS_CONST(exp_hi, 88.3762626647949f); _PS_CONST(exp_lo, -88.3762626647949f); _PS_CONST(cephes_LOG2EF, 1.44269504088896341); _PS_CONST(cephes_exp_C1, 0.693359375); _PS_CONST(cephes_exp_C2, -2.12194440e-4); _PS_CONST(cephes_exp_p0, 1.9875691500E-4); _PS_CONST(cephes_exp_p1, 1.3981999507E-3); _PS_CONST(cephes_exp_p2, 8.3334519073E-3); _PS_CONST(cephes_exp_p3, 4.1665795894E-2); _PS_CONST(cephes_exp_p4, 1.6666665459E-1); _PS_CONST(cephes_exp_p5, 5.0000001201E-1); _PS_CONST(minus_cephes_DP1, -0.78515625); _PS_CONST(minus_cephes_DP2, -2.4187564849853515625e-4); _PS_CONST(minus_cephes_DP3, -3.77489497744594108e-8); _PS_CONST(sincof_p0, -1.9515295891E-4); _PS_CONST(sincof_p1, 8.3321608736E-3); _PS_CONST(sincof_p2, -1.6666654611E-1); _PS_CONST(coscof_p0, 2.443315711809948E-005); _PS_CONST(coscof_p1, -1.388731625493765E-003); _PS_CONST(coscof_p2, 4.166664568298827E-002); _PS_CONST(cephes_FOPI, 1.27323954473516); // 4 / M_PI #endif /* ARM */ /* the smallest non denormalized double number */ /*_PD_CONST_TYPE(min_norm_pos, int64_t, 0x00800000); _PD_CONST_TYPE(mant_mask, int64_t, 0x7f800000); _PD_CONST_TYPE(inv_mant_mask, int64_t, ~0x7f800000); _PD_CONST_TYPE(sign_mask, int64_t, (int64_t) 0x80000000); _PD_CONST_TYPE(inv_sign_mask, int64_t, ~0x80000000);*/ _PD_CONST_TYPE(min_norm_pos, int64_t, 0x380ffff83ce549caL); _PD_CONST_TYPE(mant_mask, int64_t, 0xFFFFFFFFFFFFFL); _PD_CONST_TYPE(inv_mant_mask, int64_t, ~0xFFFFFFFFFFFFFL); _PD_CONST_TYPE(sign_mask, int64_t, (int64_t) 0x8000000000000000L); _PD_CONST_TYPE(inv_sign_mask, int64_t, ~0x8000000000000000L); _PD_CONST(minus_cephes_DP1, -7.85398125648498535156E-1); _PD_CONST(minus_cephes_DP2, -3.77489470793079817668E-8); _PD_CONST(minus_cephes_DP3, -2.69515142907905952645E-15); _PD_CONST(sincof_p0, 1.58962301576546568060E-10); _PD_CONST(sincof_p1, -2.50507477628578072866E-8); _PD_CONST(sincof_p2, 2.75573136213857245213E-6); _PD_CONST(sincof_p3, -1.98412698295895385996E-4); _PD_CONST(sincof_p4, 8.33333333332211858878E-3); _PD_CONST(sincof_p5, -1.66666666666666307295E-1); _PD_CONST(coscof_p0, -1.13585365213876817300E-11); _PD_CONST(coscof_p1, 2.08757008419747316778E-9); _PD_CONST(coscof_p2, -2.75573141792967388112E-7); _PD_CONST(coscof_p3, 2.48015872888517045348E-5); _PD_CONST(coscof_p4, -1.38888888888730564116E-3); _PD_CONST(coscof_p5, 4.16666666666665929218E-2); _PD_CONST(cephes_FOPI, 1.2732395447351626861510701069801148); // 4 / M_PI _PD_CONST(1, 1.0); _PD_CONST(2, 2.0); _PD_CONST(0p5, 0.5); _PI64_CONST(1, 1); _PI64_CONST(inv1, ~1); _PI64_CONST(2, 2); _PI64_CONST(4, 4); _PI64_CONST(0x7f, 0x7f); _PD_CONST(cephes_SQRTHF, 0.70710678118654752440); _PD_CONST(cephes_log_p0, 1.01875663804580931796E-4); _PD_CONST(cephes_log_p1, -4.97494994976747001425E-1); _PD_CONST(cephes_log_p2, 4.70579119878881725854E0); _PD_CONST(cephes_log_p3, -1.44989225341610930846E1); _PD_CONST(cephes_log_p4, +1.79368678507819816313E1); _PD_CONST(cephes_log_p5, -7.70838733755885391666E0); _PD_CONST(cephes_log_q1, -1.12873587189167450590E1); _PD_CONST(cephes_log_q2, 4.52279145837532221105E1); _PD_CONST(cephes_log_q3, -8.29875266912776603211E1); _PD_CONST(cephes_log_q4, 7.11544750618563894466E1); _PD_CONST(cephes_log_q5, 4.52279145837532221105E1); _PD_CONST(cephes_log_q6, -2.31251620126765340583E1); _PD_CONST(exp_hi, 709.437); _PD_CONST(exp_lo, -709.436139303); _PD_CONST(cephes_LOG2EF, 1.4426950408889634073599); _PD_CONST(cephes_exp_p0, 1.26177193074810590878e-4); _PD_CONST(cephes_exp_p1, 3.02994407707441961300e-2); _PD_CONST(cephes_exp_p2, 9.99999999999999999910e-1); _PD_CONST(cephes_exp_q0, 3.00198505138664455042e-6); _PD_CONST(cephes_exp_q1, 2.52448340349684104192e-3); _PD_CONST(cephes_exp_q2, 2.27265548208155028766e-1); _PD_CONST(cephes_exp_q3, 2.00000000000000000009e0); _PD_CONST(cephes_exp_C1, 0.693145751953125); _PD_CONST(cephes_exp_C2, 1.42860682030941723212e-6); _PD_CONST_TYPE(positive_mask, int64_t, (int64_t) 0x7FFFFFFFFFFFFFFFL); _PD_CONST_TYPE(negative_mask, int64_t, (int64_t) ~0x7FFFFFFFFFFFFFFFL); _PD_CONST(ASIN_P0, 4.253011369004428248960E-3); _PD_CONST(ASIN_P1, -6.019598008014123785661E-1); _PD_CONST(ASIN_P2, 5.444622390564711410273E0); _PD_CONST(ASIN_P3, -1.626247967210700244449E1); _PD_CONST(ASIN_P4, 1.956261983317594739197E1); _PD_CONST(ASIN_P5, -8.198089802484824371615E0); _PD_CONST(ASIN_Q0, -1.474091372988853791896E1); _PD_CONST(ASIN_Q1, 7.049610280856842141659E1); _PD_CONST(ASIN_Q2, -1.471791292232726029859E2); _PD_CONST(ASIN_Q3, 1.395105614657485689735E2); _PD_CONST(ASIN_Q4, -4.918853881490881290097E1); _PD_CONST(ASIN_R0, 2.967721961301243206100E-3); _PD_CONST(ASIN_R1, -5.634242780008963776856E-1); _PD_CONST(ASIN_R2, 6.968710824104713396794E0); _PD_CONST(ASIN_R3, -2.556901049652824852289E1); _PD_CONST(ASIN_R4, 2.853665548261061424989E1); _PD_CONST(ASIN_S0, -2.194779531642920639778E1); _PD_CONST(ASIN_S1, 1.470656354026814941758E2); _PD_CONST(ASIN_S2, -3.838770957603691357202E2); _PD_CONST(ASIN_S3, 3.424398657913078477438E2); _PD_CONST(PIO2, 1.57079632679489661923); /* pi/2 */ _PD_CONST(PIO4, 7.85398163397448309616E-1); /* pi/4 */ _PD_CONST(minMOREBITS, -6.123233995736765886130E-17); _PD_CONST(MOREBITS, 6.123233995736765886130E-17); _PD_CONST(ATAN_P0, -8.750608600031904122785E-1); _PD_CONST(ATAN_P1, -1.615753718733365076637E1); _PD_CONST(ATAN_P2, -7.500855792314704667340E1); _PD_CONST(ATAN_P3, -1.228866684490136173410E2); _PD_CONST(ATAN_P4, -6.485021904942025371773E1); _PD_CONST(ATAN_Q0, 2.485846490142306297962E1); _PD_CONST(ATAN_Q1, 1.650270098316988542046E2); _PD_CONST(ATAN_Q2, 4.328810604912902668951E2); _PD_CONST(ATAN_Q3, 4.853903996359136964868E2); _PD_CONST(ATAN_Q4, 1.945506571482613964425E2); _PD_CONST(TAN3PI8, 2.41421356237309504880); /* 3*pi/8 */ _PD_CONST(min1, -1.0); #include "simd_utils_sse_double.h" #include "simd_utils_sse_float.h" #include "simd_utils_sse_int32.h" #endif /* SSE */ #ifdef AVX #ifndef __clang__ #ifndef __INTEL_COMPILER #ifndef __cplusplus // TODO : it seems to be defined with G++ 9.2 and not GCC 9.2 static inline __m256 _mm256_set_m128(__m128 H, __m128 L) //not present on every GCC version { return _mm256_insertf128_ps(_mm256_castps128_ps256(L), H, 1); } #endif #endif #endif /* __clang__ */ #define AVX_LEN_BYTES 32 // Size of AVX lane #define AVX_LEN_INT32 8 // number of int32 with an AVX lane #define AVX_LEN_FLOAT 8 // number of float with an AVX lane #define AVX_LEN_DOUBLE 4 // number of double with an AVX lane static inline __m256 _mm256_fmadd_ps_custom(__m256 a, __m256 b, __m256 c) { #ifndef FMA //Haswell comes with avx2 and fma return _mm256_add_ps(_mm256_mul_ps(a, b), c); #else /* FMA */ return _mm256_fmadd_ps(a, b, c); #endif /* FMA */ } static inline __m256 _mm256_fnmadd_ps_custom(__m256 a, __m256 b, __m256 c) { #ifndef FMA //Haswell comes with avx2 and fma return _mm256_sub_ps(c, _mm256_mul_ps(a, b)); #else /* FMA */ return _mm256_fnmadd_ps(a, b, c); #endif /* FMA */ } static inline __m256d _mm256_fmadd_pd_custom(__m256d a, __m256d b, __m256d c) { #ifndef FMA //Haswell comes with avx2 and fma return _mm256_add_pd(_mm256_mul_pd(a, b), c); #else /* FMA */ return _mm256_fmadd_pd(a, b, c); #endif /* FMA */ } static inline __m256d _mm256_fnmadd_pd_custom(__m256d a, __m256d b, __m256d c) { #ifndef FMA //Haswell comes with avx2 and fma return _mm256_sub_pd(c, _mm256_mul_pd(a, b)); #else /* FMA */ return _mm256_fnmadd_pd(a, b, c); #endif /* FMA */ } #include "avx_mathfun.h" #define _PD256_CONST(Name, Val) \ static const ALIGN32_BEG double _pd256_##Name[4] ALIGN32_END = {Val, Val, Val, Val} #define _PI256_64_CONST(Name, Val) \ static const ALIGN32_BEG int64_t _pi256_64_##Name[4] ALIGN32_END = {Val, Val, Val, Val} #define _PD256_CONST_TYPE(Name, Type, Val) \ static const ALIGN32_BEG Type _pd256_##Name[4] ALIGN32_END = {Val, Val, Val, Val} _PD256_CONST_TYPE(min_norm_pos, int64_t, 0x380ffff83ce549caL); _PD256_CONST_TYPE(mant_mask, int64_t, 0xFFFFFFFFFFFFFL); _PD256_CONST_TYPE(inv_mant_mask, int64_t, ~0xFFFFFFFFFFFFFL); _PD256_CONST_TYPE(sign_mask, int64_t, (int64_t) 0x8000000000000000L); _PD256_CONST_TYPE(inv_sign_mask, int64_t, ~0x8000000000000000L); _PD256_CONST(minus_cephes_DP1, -7.85398125648498535156E-1); _PD256_CONST(minus_cephes_DP2, -3.77489470793079817668E-8); _PD256_CONST(minus_cephes_DP3, -2.69515142907905952645E-15); _PD256_CONST(sincof_p0, 1.58962301576546568060E-10); _PD256_CONST(sincof_p1, -2.50507477628578072866E-8); _PD256_CONST(sincof_p2, 2.75573136213857245213E-6); _PD256_CONST(sincof_p3, -1.98412698295895385996E-4); _PD256_CONST(sincof_p4, 8.33333333332211858878E-3); _PD256_CONST(sincof_p5, -1.66666666666666307295E-1); _PD256_CONST(coscof_p0, -1.13585365213876817300E-11); _PD256_CONST(coscof_p1, 2.08757008419747316778E-9); _PD256_CONST(coscof_p2, -2.75573141792967388112E-7); _PD256_CONST(coscof_p3, 2.48015872888517045348E-5); _PD256_CONST(coscof_p4, -1.38888888888730564116E-3); _PD256_CONST(coscof_p5, 4.16666666666665929218E-2); _PD256_CONST(cephes_FOPI, 1.2732395447351626861510701069801148); // 4 / M_PI _PD256_CONST_TYPE(positive_mask, int64_t, (int64_t) 0x7FFFFFFFFFFFFFFFL); _PD256_CONST_TYPE(negative_mask, int64_t, (int64_t) ~0x7FFFFFFFFFFFFFFFL); _PD256_CONST(ASIN_P0, 4.253011369004428248960E-3); _PD256_CONST(ASIN_P1, -6.019598008014123785661E-1); _PD256_CONST(ASIN_P2, 5.444622390564711410273E0); _PD256_CONST(ASIN_P3, -1.626247967210700244449E1); _PD256_CONST(ASIN_P4, 1.956261983317594739197E1); _PD256_CONST(ASIN_P5, -8.198089802484824371615E0); _PD256_CONST(ASIN_Q0, -1.474091372988853791896E1); _PD256_CONST(ASIN_Q1, 7.049610280856842141659E1); _PD256_CONST(ASIN_Q2, -1.471791292232726029859E2); _PD256_CONST(ASIN_Q3, 1.395105614657485689735E2); _PD256_CONST(ASIN_Q4, -4.918853881490881290097E1); _PD256_CONST(ASIN_R0, 2.967721961301243206100E-3); _PD256_CONST(ASIN_R1, -5.634242780008963776856E-1); _PD256_CONST(ASIN_R2, 6.968710824104713396794E0); _PD256_CONST(ASIN_R3, -2.556901049652824852289E1); _PD256_CONST(ASIN_R4, 2.853665548261061424989E1); _PD256_CONST(ASIN_S0, -2.194779531642920639778E1); _PD256_CONST(ASIN_S1, 1.470656354026814941758E2); _PD256_CONST(ASIN_S2, -3.838770957603691357202E2); _PD256_CONST(ASIN_S3, 3.424398657913078477438E2); _PD256_CONST(PIO2, 1.57079632679489661923); /* pi/2 */ _PD256_CONST(PIO4, 7.85398163397448309616E-1); /* pi/4 */ _PD256_CONST(minMOREBITS, -6.123233995736765886130E-17); _PD256_CONST(MOREBITS, 6.123233995736765886130E-17); _PD256_CONST(ATAN_P0, -8.750608600031904122785E-1); _PD256_CONST(ATAN_P1, -1.615753718733365076637E1); _PD256_CONST(ATAN_P2, -7.500855792314704667340E1); _PD256_CONST(ATAN_P3, -1.228866684490136173410E2); _PD256_CONST(ATAN_P4, -6.485021904942025371773E1); _PD256_CONST(ATAN_Q0, 2.485846490142306297962E1); _PD256_CONST(ATAN_Q1, 1.650270098316988542046E2); _PD256_CONST(ATAN_Q2, 4.328810604912902668951E2); _PD256_CONST(ATAN_Q3, 4.853903996359136964868E2); _PD256_CONST(ATAN_Q4, 1.945506571482613964425E2); _PD256_CONST(TAN3PI8, 2.41421356237309504880); /* 3*pi/8 */ _PD256_CONST(min1, -1.0); _PD256_CONST(1, 1.0); _PD256_CONST(2, 2.0); _PD256_CONST(0p5, 0.5); _PI256_64_CONST(1, 1); _PI256_64_CONST(inv1, ~1); _PI256_64_CONST(2, 2); _PI256_64_CONST(4, 4); _PI256_64_CONST(0x7f, 0x7f); #include "simd_utils_avx_double.h" #include "simd_utils_avx_float.h" #include "simd_utils_avx_int32.h" #endif /* AVX */ #ifdef AVX512 #define AVX512_LEN_BYTES 64 // Size of AVX512 lane #define AVX512_LEN_INT32 16 // number of int32 with an AVX512 lane #define AVX512_LEN_FLOAT 16 // number of float with an AVX512 lane #define AVX512_LEN_DOUBLE 8 // number of double with an AVX512 lane static inline __m512 _mm512_fmadd_ps_custom(__m512 a, __m512 b, __m512 c) { #ifndef FMA return _mm512_add_ps(_mm512_mul_ps(a, b), c); #else /* FMA */ return _mm512_fmadd_ps(a, b, c); #endif /* FMA */ } static inline __m512d _mm512_fmadd_pd_custom(__m512d a, __m512d b, __m512d c) { #ifndef FMA //Haswell comes with avx2 and fma return _mm512_add_pd(_mm512_mul_pd(a, b), c); #else /* FMA */ return _mm512_fmadd_pd(a, b, c); #endif /* FMA */ } static inline __m512d _mm512_fnmadd_pd_custom(__m512d a, __m512d b, __m512d c) { #ifndef FMA //Haswell comes with avx2 and fma return _mm512_sub_pd(c, _mm512_mul_pd(a, b)); #else /* FMA */ return _mm512_fnmadd_pd(a, b, c); #endif /* FMA */ } #include "avx512_mathfun.h" #define _PD512_CONST(Name, Val) \ static const ALIGN64_BEG double _pd512_##Name[8] ALIGN64_END = {Val, Val, Val, Val, Val, Val, Val, Val} #define _PI512_64_CONST(Name, Val) \ static const ALIGN64_BEG int64_t _pi512_64_##Name[8] ALIGN64_END = {Val, Val, Val, Val, Val, Val, Val, Val} #define _PD512_CONST_TYPE(Name, Type, Val) \ static const ALIGN64_BEG Type _pd512_##Name[8] ALIGN64_END = {Val, Val, Val, Val, Val, Val, Val, Val} _PD512_CONST_TYPE(min_norm_pos, int64_t, 0x380ffff83ce549caL); _PD512_CONST_TYPE(mant_mask, int64_t, 0xFFFFFFFFFFFFFL); _PD512_CONST_TYPE(inv_mant_mask, int64_t, ~0xFFFFFFFFFFFFFL); _PD512_CONST_TYPE(sign_mask, int64_t, (int64_t) 0x8000000000000000L); _PD512_CONST_TYPE(inv_sign_mask, int64_t, ~0x8000000000000000L); _PD512_CONST(minus_cephes_DP1, -7.85398125648498535156E-1); _PD512_CONST(minus_cephes_DP2, -3.77489470793079817668E-8); _PD512_CONST(minus_cephes_DP3, -2.69515142907905952645E-15); _PD512_CONST(sincof_p0, 1.58962301576546568060E-10); _PD512_CONST(sincof_p1, -2.50507477628578072866E-8); _PD512_CONST(sincof_p2, 2.75573136213857245213E-6); _PD512_CONST(sincof_p3, -1.98412698295895385996E-4); _PD512_CONST(sincof_p4, 8.33333333332211858878E-3); _PD512_CONST(sincof_p5, -1.66666666666666307295E-1); _PD512_CONST(coscof_p0, -1.13585365213876817300E-11); _PD512_CONST(coscof_p1, 2.08757008419747316778E-9); _PD512_CONST(coscof_p2, -2.75573141792967388112E-7); _PD512_CONST(coscof_p3, 2.48015872888517045348E-5); _PD512_CONST(coscof_p4, -1.38888888888730564116E-3); _PD512_CONST(coscof_p5, 4.16666666666665929218E-2); _PD512_CONST(cephes_FOPI, 1.2732395447351626861510701069801148); // 4 / M_PI _PD512_CONST_TYPE(positive_mask, int64_t, (int64_t) 0x7FFFFFFFFFFFFFFFL); _PD512_CONST_TYPE(negative_mask, int64_t, (int64_t) ~0x7FFFFFFFFFFFFFFFL); _PD512_CONST(ASIN_P0, 4.253011369004428248960E-3); _PD512_CONST(ASIN_P1, -6.019598008014123785661E-1); _PD512_CONST(ASIN_P2, 5.444622390564711410273E0); _PD512_CONST(ASIN_P3, -1.626247967210700244449E1); _PD512_CONST(ASIN_P4, 1.956261983317594739197E1); _PD512_CONST(ASIN_P5, -8.198089802484824371615E0); _PD512_CONST(ASIN_Q0, -1.474091372988853791896E1); _PD512_CONST(ASIN_Q1, 7.049610280856842141659E1); _PD512_CONST(ASIN_Q2, -1.471791292232726029859E2); _PD512_CONST(ASIN_Q3, 1.395105614657485689735E2); _PD512_CONST(ASIN_Q4, -4.918853881490881290097E1); _PD512_CONST(ASIN_R0, 2.967721961301243206100E-3); _PD512_CONST(ASIN_R1, -5.634242780008963776856E-1); _PD512_CONST(ASIN_R2, 6.968710824104713396794E0); _PD512_CONST(ASIN_R3, -2.556901049652824852289E1); _PD512_CONST(ASIN_R4, 2.853665548261061424989E1); _PD512_CONST(ASIN_S0, -2.194779531642920639778E1); _PD512_CONST(ASIN_S1, 1.470656354026814941758E2); _PD512_CONST(ASIN_S2, -3.838770957603691357202E2); _PD512_CONST(ASIN_S3, 3.424398657913078477438E2); _PD512_CONST(PIO2, 1.57079632679489661923); /* pi/2 */ _PD512_CONST(PIO4, 7.85398163397448309616E-1); /* pi/4 */ _PD512_CONST(minMOREBITS, -6.123233995736765886130E-17); _PD512_CONST(MOREBITS, 6.123233995736765886130E-17); _PD512_CONST(ATAN_P0, -8.750608600031904122785E-1); _PD512_CONST(ATAN_P1, -1.615753718733365076637E1); _PD512_CONST(ATAN_P2, -7.500855792314704667340E1); _PD512_CONST(ATAN_P3, -1.228866684490136173410E2); _PD512_CONST(ATAN_P4, -6.485021904942025371773E1); _PD512_CONST(ATAN_Q0, 2.485846490142306297962E1); _PD512_CONST(ATAN_Q1, 1.650270098316988542046E2); _PD512_CONST(ATAN_Q2, 4.328810604912902668951E2); _PD512_CONST(ATAN_Q3, 4.853903996359136964868E2); _PD512_CONST(ATAN_Q4, 1.945506571482613964425E2); _PD512_CONST(TAN3PI8, 2.41421356237309504880); /* 3*pi/8 */ _PD512_CONST(min1, -1.0); _PD512_CONST(1, 1.0); _PD512_CONST(2, 2.0); _PD512_CONST(0p5, 0.5); _PI512_64_CONST(1, 1); _PI512_64_CONST(inv1, ~1); _PI512_64_CONST(2, 2); _PI512_64_CONST(4, 4); _PI512_64_CONST(0x7f, 0x7f); #include "simd_utils_avx512_double.h" #include "simd_utils_avx512_float.h" #include "simd_utils_avx512_int32.h" #endif #ifdef ICC #include "simd_utils_svml.h" #endif #else /* RISCV */ #include "simd_utils_riscv.h" #endif /* RISCV */ #ifdef CUSTOM_MALLOC //Thanks to Jpommier pfft https://bitbucket.org/jpommier/pffft/src/default/pffft.c static inline int posix_memalign(void **pointer, size_t len, int alignement) { void *p, *p0 = malloc(len + alignement); if (!p0) return (void *) NULL; p = (void *) (((size_t) p0 + alignement) & (~((size_t) (alignement - 1)))); *((void **) p - 1) = p0; *pointer = p; return 0; } static inline void *aligned_malloc(size_t len, int alignement) { void *p, *p0 = malloc(len + alignement); if (!p0) return (void *) NULL; p = (void *) (((size_t) p0 + alignement) & (~((size_t) (alignement - 1)))); *((void **) p - 1) = p0; return p; } //Work in progress static inline void aligned_free(void *p) { if (p) free(*((void **) p - 1)); } #endif /* CUSTOM_MALLOC */ ////////// C Test functions //////////////// static inline void log10f_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) dst[i] = log10f(src[i]); } static inline void log2f_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) dst[i] = log2f(src[i]); } static inline void lnf_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) dst[i] = logf(src[i]); } static inline void expf_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = expf(src[i]); } } static inline void fabsf_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = fabsf(src[i]); } } static inline void setf_C(float *src, float value, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { src[i] = value; } } static inline void zerof_C(float *src, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { src[i] = 0.0f; } } static inline void copyf_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = src[i]; } } static inline void addcf_C(float *src, float value, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = src[i] + value; } } static inline void addcs_C(int32_t *src, int32_t value, int32_t *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = src[i] + value; } } static inline void mulf_C(float *src1, float *src2, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = src1[i] * src2[i]; } } static inline void mulcf_C(float *src, float value, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = src[i] * value; } } static inline void muladdf_C(float *_a, float *_b, float *_c, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = _a[i] * _b[i] + _c[i]; } } static inline void mulcaddf_C(float *_a, float _b, float *_c, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = _a[i] * _b + _c[i]; } } static inline void mulcaddcf_C(float *_a, float _b, float _c, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = _a[i] * _b + _c; } } static inline void muladdcf_C(float *_a, float *_b, float _c, float *dst, int len) { for (int i = 0; i < len; i++) { dst[i] = _a[i] * _b[i] + _c; } } static inline void muls_c(int32_t *a, int32_t *b, int32_t *c, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { c[i] = a[i] * b[i]; } } static inline void divf_C(float *src1, float *src2, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = src1[i] / src2[i]; } } static inline void cplxtorealf_C(float *src, float *dstRe, float *dstIm, int len) { int j = 0; #ifdef OMP #pragma omp simd #endif for (int i = 0; i < 2 * len; i += 2) { dstRe[j] = src[i]; dstIm[j] = src[i + 1]; j++; } } static inline void realtocplx_C(float *srcRe, float *srcIm, float *dst, int len) { int j = 0; #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[j] = srcRe[i]; dst[j + 1] = srcIm[i]; j += 2; } } static inline void convert_64f32f_C(double *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = (float) src[i]; } } static inline void convert_32f64f_C(float *src, double *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = (double) src[i]; } } static inline void convertFloat32ToU8_C(float *src, uint8_t *dst, int len, int rounding_mode, int scale_factor) { float scale_fact_mult = 1.0f / (float) (1 << scale_factor); if (rounding_mode == RndZero) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { float tmp = floorf(src[i] * scale_fact_mult); dst[i] = (uint8_t) (tmp > 255.0f ? 255.0f : tmp); } } else if (rounding_mode == RndNear) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { float tmp = roundf(src[i] * scale_fact_mult); dst[i] = (uint8_t) (tmp > 255.0f ? 255.0f : tmp); } } else if (rounding_mode == RndFinancial) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { float tmp = (roundf(src[i] * scale_fact_mult * 0.5f) / 2.0f); dst[i] = (uint8_t) (tmp > 255.0f ? 255.0f : tmp); } } else { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { float tmp = src[i] * scale_fact_mult; dst[i] = (uint8_t) (tmp > 255.0f ? 255.0f : tmp); } } } static inline void convertInt16ToFloat32_C(int16_t *src, float *dst, int len, int scale_factor) { float scale_fact_mult = 1.0f / (float) (1 << scale_factor); #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = (float) src[i] * scale_fact_mult; } } static inline void threshold_gt_f_C(float *src, float *dst, int len, float value) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = src[i] < value ? src[i] : value; } } static inline void threshold_gtabs_f_C(float *src, float *dst, int len, float value) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { if (src[i] >= 0.0f) { dst[i] = src[i] > value ? value : src[i]; } else { dst[i] = src[i] < (-value) ? (-value) : src[i]; } } } static inline void threshold_lt_f_C(float *src, float *dst, int len, float value) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = src[i] > value ? src[i] : value; } } static inline void threshold_ltabs_f_C(float *src, float *dst, int len, float value) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { if (src[i] >= 0.0f) { dst[i] = src[i] < value ? value : src[i]; } else { dst[i] = src[i] > (-value) ? (-value) : src[i]; } } } static inline void threshold_ltval_gtval_f_C(float *src, float *dst, int len, float ltlevel, float ltvalue, float gtlevel, float gtvalue) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = src[i] < ltlevel ? ltvalue : src[i]; dst[i] = dst[i] > gtlevel ? gtvalue : dst[i]; } } static inline void magnitudef_C_interleaved(complex32_t *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = sqrtf(src[i].re * src[i].re + src[i].im * src[i].im); } } static inline void magnitudef_C_split(float *srcRe, float *srcIm, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = sqrtf(srcRe[i] * srcRe[i] + srcIm[i] * srcIm[i]); } } static inline void powerspectf_C_split(float *srcRe, float *srcIm, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = srcRe[i] * srcRe[i] + srcIm[i] * srcIm[i]; } } static inline void meanf_C(float *src, float *dst, int len) { float acc = 0.0f; int i; #ifdef OMP #pragma omp simd reduction(+ \ : acc) #endif for (i = 0; i < len; i++) { acc += src[i]; } acc = acc / (float) len; *dst = acc; } static inline void sumf_C(float *src, float *dst, int len) { float tmp_acc = 0.0f; for (int i = 0; i < len; i++) { tmp_acc += src[i]; } *dst = tmp_acc; } static inline void flipf_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[len - i - 1] = src[i]; } } static inline void asinf_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = asinf(src[i]); } } static inline void asin_C(double *src, double *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = asin(src[i]); } } static inline void tanf_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = tanf(src[i]); } } static inline void tanhf_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = tanhf(src[i]); } } static inline void sinhf_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = sinhf(src[i]); } } static inline void coshf_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = coshf(src[i]); } } static inline void atanhf_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = atanhf(src[i]); } } static inline void asinhf_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = asinhf(src[i]); } } static inline void acoshf_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = acoshf(src[i]); } } static inline void atan_C(double *src, double *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = atan(src[i]); } } static inline void atanf_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = atanf(src[i]); } } static inline void atan2f_C(float *src1, float *src2, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = atan2f(src1[i], src2[i]); } } static inline void sinf_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = sinf(src[i]); } } static inline void cosf_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = cosf(src[i]); } } static inline void sincosf_C(float *src, float *dst_sin, float *dst_cos, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { mysincosf(src[i], dst_sin + i, dst_cos + i); } } static inline void sincosd_C(double *src, double *dst_sin, double *dst_cos, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst_sin[i] = sin(src[i]); dst_cos[i] = cos(src[i]); } } static inline void sqrtf_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = sqrtf(src[i]); } } static inline void floorf_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = floorf(src[i]); } } static inline void ceilf_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = ceilf(src[i]); } } static inline void roundf_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = roundf(src[i]); } } static inline void truncf_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = truncf(src[i]); } } static inline void floord_C(double *src, double *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = floor(src[i]); } } static inline void ceild_C(double *src, double *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = ceil(src[i]); } } static inline void roundd_C(double *src, double *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = round(src[i]); } } static inline void truncd_C(double *src, double *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = trunc(src[i]); } } static inline void cplxvecmul_C(complex32_t *src1, complex32_t *src2, complex32_t *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i].re = src1[i].re * src2[i].re - src1[i].im * src2[i].im; dst[i].im = src1[i].re * src2[i].im + src2[i].re * src1[i].im; } } static inline void cplxvecmul_C_split(float *src1Re, float *src1Im, float *src2Re, float *src2Im, float *dstRe, float *dstIm, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dstRe[i] = src1Re[i] * src2Re[i] - src1Im[i] * src2Im[i]; dstIm[i] = src1Re[i] * src2Im[i] + src2Re[i] * src1Im[i]; } } static inline void cplxconjvecmul_C(complex32_t *src1, complex32_t *src2, complex32_t *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i].re = src1[i].re * src2[i].re + src1[i].im * src2[i].im; dst[i].im = src2[i].re * src1[i].im - src1[i].re * src2[i].im; } } static inline void cplxconjvecmul_C_split(float *src1Re, float *src1Im, float *src2Re, float *src2Im, float *dstRe, float *dstIm, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dstRe[i] = src1Re[i] * src2Re[i] + src1Im[i] * src2Im[i]; dstIm[i] = src2Re[i] * src1Im[i] - src1Re[i] * src2Im[i]; } } static inline void cplxconj_C(complex32_t *src, complex32_t *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i].re = src[i].re; dst[i].im = -src[i].im; } } static inline void vectorSlopef_C(float *dst, int len, float offset, float slope) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = (float) i * slope + offset; } } static inline void vectorSloped_C(double *dst, int len, double offset, double slope) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = (double) i * slope + offset; } } static inline void vectorSlopes_C(int32_t *dst, int len, int32_t offset, int32_t slope) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = (int32_t) i * slope + offset; } } static inline void maxeveryf_c(float *src1, float *src2, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = src1[i] > src2[i] ? src1[i] : src2[i]; } } static inline void mineveryf_c(float *src1, float *src2, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = src1[i] < src2[i] ? src1[i] : src2[i]; } } static inline void minmaxf_c(float *src, int len, float *min_value, float *max_value) { float min_tmp = src[0]; float max_tmp = src[0]; #ifdef OMP #pragma omp simd #endif for (int i = 1; i < len; i++) { max_tmp = max_tmp > src[i] ? max_tmp : src[i]; min_tmp = min_tmp < src[i] ? min_tmp : src[i]; } *max_value = max_tmp; *min_value = min_tmp; } static inline void addf_c(float *a, float *b, float *c, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { c[i] = a[i] + b[i]; } } static inline void adds_c(int32_t *a, int32_t *b, int32_t *c, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { c[i] = a[i] + b[i]; } } static inline void subf_c(float *a, float *b, float *c, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { c[i] = a[i] - b[i]; } } static inline void subcrevf_C(float *src, float value, float *dst, int len) { for (int i = 0; i < len; i++) { dst[i] = value - src[i]; } } static inline void subs_c(int32_t *a, int32_t *b, int32_t *c, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { c[i] = a[i] - b[i]; } } /*static inline void orf_c(float *a, float *b, float *c, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { c[i] = a[i] | b[i]; } }*/ static inline void setd_C(double *src, double value, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { src[i] = value; } } static inline void zerod_C(double *src, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { src[i] = 0.0; } } static inline void copyd_C(double *src, double *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = src[i]; } } static inline void copys_C(int32_t *src, int32_t *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = src[i]; } } static inline void sqrtd_C(double *src, double *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = sqrt(src[i]); } } static inline void addd_c(double *a, double *b, double *c, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { c[i] = a[i] + b[i]; } } static inline void muld_c(double *a, double *b, double *c, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { c[i] = a[i] * b[i]; } } static inline void subd_c(double *a, double *b, double *c, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { c[i] = a[i] - b[i]; } } static inline void divd_c(double *a, double *b, double *c, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { c[i] = a[i] / b[i]; } } static inline void mulcd_C(double *src, double value, double *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = src[i] * value; } } static inline void muladdd_C(double *_a, double *_b, double *_c, double *dst, int len) { for (int i = 0; i < len; i++) { dst[i] = _a[i] * _b[i] + _c[i]; } } static inline void mulcaddd_C(double *_a, double _b, double *_c, double *dst, int len) { for (int i = 0; i < len; i++) { dst[i] = _a[i] * _b + _c[i]; } } static inline void mulcaddcd_C(double *_a, double _b, double _c, double *dst, int len) { for (int i = 0; i < len; i++) { dst[i] = _a[i] * _b + _c; } } static inline void muladdcd_C(double *_a, double *_b, double _c, double *dst, int len) { for (int i = 0; i < len; i++) { dst[i] = _a[i] * _b[i] + _c; } } static inline void addcd_C(double *src, double value, double *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = src[i] + value; } } static inline void ors_C(int32_t *a, int32_t *b, int32_t *c, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { c[i] = a[i] | b[i]; } } /*static inline void andf_C(float *a, float *b, float *c, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { c[i] = a[i] & b[i]; } }*/ static inline void ands_C(int32_t *a, int32_t *b, int32_t *c, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { c[i] = a[i] & b[i]; } } static inline void sigmoidf_C(float *src, float *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = 1.0f / (1.0f + expf(-src[i])); } } //parametric ReLU //simple ReLU can be expressed as threshold_lt with value = 0 static inline void PReluf_C(float *src, float *dst, float alpha, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { if (src[i] > 0.0f) dst[i] = src[i]; else dst[i] = alpha * src[i]; } } static inline void softmaxf_C(float *src, float *dst, int len) { float acc = 0.0f; #ifdef OMP #pragma omp simd reduction(+ \ : acc) #endif for (int i = 0; i < len; i++) { dst[i] = expf(src[i]); acc += dst[i]; } #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] /= acc; } } static inline void absdiff16s_c(int16_t *a, int16_t *b, int16_t *c, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { c[i] = abs(a[i] - b[i]); } } static inline void powerspect16s_c_interleaved(complex16s_t *src, int32_t *dst, int len) { #ifdef OMP #pragma omp simd #endif for (int i = 0; i < len; i++) { dst[i] = (int32_t)src[i].re * (int32_t)src[i].re + (int32_t)src[i].im * (int32_t)src[i].im; } } #ifdef __cplusplus } #endif
DRB031-truedepfirstdimension-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* There is a loop-carried true dependence within the outer level loop. Data race pair: b[i][j]@66:7 vs. b[i-1][j-1]@66:15 */ #include <stdlib.h> #include <stdio.h> int main(int argc, char* argv[]) { int i,j; int n=1000, m=1000; double b[1000][1000]; for (i=0; i<n; i++) for (j=0; j<m; j++) b[i][j] = 0.5; #pragma omp parallel for private(j) for (i=1;i<n;i++) for (j=1;j<m;j++) b[i][j]=b[i-1][j-1]; printf("b[500][500]=%f\n", b[500][500]); return 0; }
ef_error.c
/* { dg-do compile } */ /* { dg-options "-fcilkplus -fopenmp-simd" } */ #pragma omp declare simd linear(y:1) simdlen(4) __attribute__((vector (linear (y:1), vectorlength(4)))) int func (int x, int y) { /* { dg-error "cannot be used in the same function marked as a Cilk Plus SIMD-enabled" } */ return (x+y); } __attribute__((vector (linear (y:1), private (x)))) /* { dg-error "is not valid for" } */ int func2 (int x, int y) { return (x+y); } __attribute__((vector (linear (y:1), simdlen (4)))) /* { dg-error "is not valid for" } */ int func2_1 (int x, int y) { return (x+y); } __attribute__((vector (linear (y:1), inbranch))) /* { dg-error "is not valid for" } */ int func2_3 (int x, int y) { return (x+y); } __attribute__((vector (notinbranch, vectorlength (4)))) /* { dg-error "is not valid for" } */ int func2_2 (int x, int y) { return (x+y); } int main (void) { return (func (5,6)); }
centr.h
namespace TSnap { ///////////////////////////////////////////////// // Node centrality measures (See: http://en.wikipedia.org/wiki/Centrality) /// Returns Degree centrality of a given node NId. /// Degree centrality if a node is defined as its degree/(N-1), where N is the number of nodes in the network. double GetDegreeCentr(const PUNGraph& Graph, const int& NId); /// Returns Group Degree centrality of a given group NId. /// Degree centrality if a node is defined as its degree/(N-1), where N is the number of nodes in the network. //double GetGroupDegreeCentr(const PUNGraph& Graph, const PUNGraph& Group); double GetGroupDegreeCentr(const PUNGraph& Graph, const TIntH& GroupNodes); /// Returns Group Degree centrality of a given group NId. /// Degree centrality if a node is defined as its degree/(N-1), where N is the number of nodes in the network. //double GetGroupDegreeCentr(const PUNGraph& Graph, const PUNGraph& Group); double GetGroupClosenessCentr(const PUNGraph& Graph, const TIntH& GroupNodes); /// Returns centrality Maximum k group. TIntH MaxCPGreedyBetter(const PUNGraph& Graph, const int k); /// Returns centrality Maximum k group. TIntH MaxCPGreedyBetter1(const PUNGraph& Graph, const int k); /// Returns centrality Maximum k group. TIntH MaxCPGreedyBetter2(const PUNGraph& Graph, const int k); /// Returns centrality Maximum k group. TIntH MaxCPGreedyBetter3(const PUNGraph& Graph, const int k); /// Event importance TIntFltH EventImportance(const PNGraph& Graph, const int k); /// Intersect int Intersect(TUNGraph::TNodeI Node, TIntH NNodes); /// Intersect int Intersect(TUNGraph::TNodeI Node, TStr NNodes); /// Intersect int Intersect(TUNGraph::TNodeI Node, int *NNodes, int NNodes_br); //Load nodes list int Intersect1(TUNGraph::TNodeI Node, TStr NNodes); //Load nodes list TIntH LoadNodeList(TStr InFNmNodes); /// Returns Farness centrality of a given node NId. /// Farness centrality of a node is the average shortest path length to all other nodes that reside is the same connected component as the given node. template <class PGraph> double GetFarnessCentr(const PGraph& Graph, const int& NId, const bool& Normalized=true, const bool& IsDir=false); template <class PGraph> double GetFarnessCentrMP(const PGraph& Graph, const int& NId, const bool& Normalized=true, const bool& IsDir=false); /// Returns weighted Farness centrality of a given node \c NId. /// Farness centrality of a node is the average shortest path length to all other nodes that reside is the same connected component as the given node. double GetWeightedFarnessCentr(const PNEANet Graph, const int& NId, const TFltV& Attr, const bool& Normalized=true, const bool& IsDir=false); /// Returns Closeness centrality of a given node NId. /// Closeness centrality of a node is defined as 1/FarnessCentrality. template <class PGraph> double GetClosenessCentr(const PGraph& Graph, const int& NId, const bool& Normalized=true, const bool& IsDir=false); template <class PGraph> double GetClosenessCentrMP(const PGraph& Graph, const int& NId, const bool& Normalized=true, const bool& IsDir=false); /// Returns Closeness centrality of a given node \c NId. /// Closeness centrality of a node is defined as 1/FarnessCentrality. double GetWeightedClosenessCentr(const PNEANet Graph, const int& NId, const TFltV& Attr, const bool& Normalized=true, const bool& IsDir=false); /// Returns node Eccentricity, the largest shortest-path distance from the node NId to any other node in the Graph. /// @param IsDir false: ignore edge directions and consider edges as undirected (in case they are directed). template <class PGraph> int GetNodeEcc(const PGraph& Graph, const int& NId, const bool& IsDir=false); /// Computes (approximate) Node Beetweenness Centrality based on a sample of NodeFrac nodes. /// @param NIdBtwH hash table mapping node ids to their corresponding betweenness centrality values. /// @param NodeFrac quality of approximation. NodeFrac=1.0 gives exact betweenness values. template<class PGraph> void GetBetweennessCentr(const PGraph& Graph, TIntFltH& NIdBtwH, const double& NodeFrac=1.0, const bool& IsDir=false); /// Computes (approximate) weighted Node Beetweenness Centrality based on a sample of NodeFrac nodes. /// @param NIdBtwH hash table mapping node ids to their corresponding betweenness centrality values. /// @param NodeFrac quality of approximation. NodeFrac=1.0 gives exact betweenness values. void GetWeightedBetweennessCentr(const PNEANet Graph, TIntFltH& NIdBtwH, const TFltV& Attr, const double& NodeFrac=1.0, const bool& IsDir=false); /// Computes (approximate) Edge Beetweenness Centrality based on a sample of NodeFrac nodes. /// @param EdgeBtwH hash table mapping edges (pairs of node ids) to their corresponding betweenness centrality values. /// @param NodeFrac quality of approximation. NodeFrac=1.0 gives exact betweenness values. template<class PGraph> void GetBetweennessCentr(const PGraph& Graph, TIntPrFltH& EdgeBtwH, const double& NodeFrac=1.0, const bool& IsDir=false); /// Computes (approximate) weighted Edge Beetweenness Centrality based on a sample of NodeFrac nodes. /// @param EdgeBtwH hash table mapping edges (pairs of node ids) to their corresponding betweenness centrality values. /// @param NodeFrac quality of approximation. NodeFrac=1.0 gives exact betweenness values. void GetWeightedBetweennessCentr(const PNEANet Graph, TIntPrFltH& EdgeBtwH, const TFltV& Attr, const double& NodeFrac=1.0, const bool& IsDir=false); /// Computes (approximate) Node and Edge Beetweenness Centrality based on a sample of NodeFrac nodes. /// @param NIdBtwH hash table mapping node ids to their corresponding betweenness centrality values. /// @param EdgeBtwH hash table mapping edges (pairs of node ids) to their corresponding betweenness centrality values. /// @param NodeFrac quality of approximation. NodeFrac=1.0 gives exact betweenness values. template<class PGraph> void GetBetweennessCentr(const PGraph& Graph, TIntFltH& NIdBtwH, TIntPrFltH& EdgeBtwH, const double& NodeFrac=1.0, const bool& IsDir=false); /// Computes (approximate) weighted Node and Edge Beetweenness Centrality based on a sample of NodeFrac nodes. /// @param NIdBtwH hash table mapping node ids to their corresponding betweenness centrality values. /// @param EdgeBtwH hash table mapping edges (pairs of node ids) to their corresponding betweenness centrality values. /// @param NodeFrac quality of approximation. NodeFrac=1.0 gives exact betweenness values. void GetWeightedBetweennessCentr(const PNEANet Graph, TIntFltH& NIdBtwH, TIntPrFltH& EdgeBtwH, const TFltV& Attr, const double& NodeFrac=1.0, const bool& IsDir=false); /// Computes (approximate) Beetweenness Centrality of all nodes and all edges of the network. /// To obtain exact betweenness values one needs to solve single-source shortest-path problem for every node. /// To speed up the algorithm we solve the shortest-path problem for the BtwNIdV subset of nodes. This gives centrality values that are about Graph->GetNodes()/BtwNIdV.Len() times lower than the exact betweenness centrality valus. /// See "A Faster Algorithm for Beetweenness Centrality", Ulrik Brandes, Journal of Mathematical Sociology, 2001, and /// "Centrality Estimation in Large Networks", Urlik Brandes and Christian Pich, 2006 for more details. template<class PGraph> void GetBetweennessCentr(const PGraph& Graph, const TIntV& BtwNIdV, TIntFltH& NodeBtwH, const bool& DoNodeCent, TIntPrFltH& EdgeBtwH, const bool& DoEdgeCent, const bool& IsDir); /// Computes (approximate) weighted Beetweenness Centrality of all nodes and all edges of the network. void GetWeightedBetweennessCentr(const PNEANet Graph, const TIntV& BtwNIdV, TIntFltH& NodeBtwH, const bool& DoNodeCent, TIntPrFltH& EdgeBtwH, const bool& DoEdgeCent, const TFltV& Attr, const bool& IsDir); /// Computes Eigenvector Centrality of all nodes in the network /// Eigenvector Centrality of a node N is defined recursively as the average of centrality values of N's neighbors in the network. void GetEigenVectorCentr(const PUNGraph& Graph, TIntFltH& NIdEigenH, const double& Eps=1e-4, const int& MaxIter=100); /// PageRank /// For more info see: http://en.wikipedia.org/wiki/PageRank template<class PGraph> void GetPageRank(const PGraph& Graph, TIntFltH& PRankH, const double& C=0.85, const double& Eps=1e-4, const int& MaxIter=100); template<class PGraph> void GetPageRank_v1(const PGraph& Graph, TIntFltH& PRankH, const double& C=0.85, const double& Eps=1e-4, const int& MaxIter=100); #ifdef USE_OPENMP template<class PGraph> void GetPageRankMP(const PGraph& Graph, TIntFltH& PRankH, const double& C=0.85, const double& Eps=1e-4, const int& MaxIter=100); #endif /// Weighted PageRank (TODO: Use template) int GetWeightedPageRank(const PNEANet Graph, TIntFltH& PRankH, const TStr& Attr, const double& C=0.85, const double& Eps=1e-4, const int& MaxIter=100); #ifdef USE_OPENMP int GetWeightedPageRankMP(const PNEANet Graph, TIntFltH& PRankH, const TStr& Attr, const double& C=0.85, const double& Eps=1e-4, const int& MaxIter=100); #endif /// HITS: Hubs and Authorities /// For more info see: http://en.wikipedia.org/wiki/HITS_algorithm) template<class PGraph> void GetHits(const PGraph& Graph, TIntFltH& NIdHubH, TIntFltH& NIdAuthH, const int& MaxIter=20); #ifdef USE_OPENMP template<class PGraph> void GetHitsMP(const PGraph& Graph, TIntFltH& NIdHubH, TIntFltH& NIdAuthH, const int& MaxIter=20); #endif /// Dijkstra Algorithm /// For more info see: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm int GetWeightedShortestPath(const PNEANet Graph, const int& SrcNId, TIntFltH& NIdDistH, const TFltV& Attr); ///////////////////////////////////////////////// // Implementation template <class PGraph> double GetFarnessCentr(const PGraph& Graph, const int& NId, const bool& Normalized, const bool& IsDir) { TIntH NDistH(Graph->GetNodes()); TSnap::GetShortPath<PGraph>(Graph, NId, NDistH, IsDir, TInt::Mx); double sum = 0; for (TIntH::TIter I = NDistH.BegI(); I < NDistH.EndI(); I++) { sum += I->Dat(); } if (NDistH.Len() > 1) { double centr = sum/double(NDistH.Len()-1); if (Normalized) { centr *= (Graph->GetNodes() - 1)/double(NDistH.Len()-1); } return centr; } else { return 0.0; } } template <class PGraph> double GetFarnessCentrMP(const PGraph& Graph, const int& NId, const bool& Normalized, const bool& IsDir) { TIntH NDistH(Graph->GetNodes()); TSnap::GetShortPath<PGraph>(Graph, NId, NDistH, IsDir, TInt::Mx); double sum = 0; for (TIntH::TIter I = NDistH.BegI(); I < NDistH.EndI(); I++) { sum += I->Dat(); } if (NDistH.Len() > 1) { double centr = sum/double(NDistH.Len()-1); if (Normalized) { centr *= (Graph->GetNodes() - 1)/double(NDistH.Len()-1); } return centr; } else { return 0.0; } } template <class PGraph> double GetClosenessCentr(const PGraph& Graph, const int& NId, const bool& Normalized, const bool& IsDir) { const double Farness = GetFarnessCentr<PGraph> (Graph, NId, Normalized, IsDir); if (Farness != 0.0) { return 1.0/Farness; } else { return 0.0; } return 0.0; } template <class PGraph> double GetClosenessCentrMP(const PGraph& Graph, const int& NId, const bool& Normalized, const bool& IsDir) { const double Farness = GetFarnessCentrMP<PGraph> (Graph, NId, Normalized, IsDir); if (Farness != 0.0) { return 1.0/Farness; } else { return 0.0; } return 0.0; } template <class PGraph> int GetNodeEcc(const PGraph& Graph, const int& NId, const bool& IsDir) { int NodeEcc; int Dist; TBreathFS<PGraph> BFS(Graph); // get shortest paths to all the nodes BFS.DoBfs(NId, true, ! IsDir, -1, TInt::Mx); NodeEcc = 0; // find the largest value for (int i = 0; i < BFS.NIdDistH.Len(); i++) { Dist = BFS.NIdDistH[i]; if (Dist > NodeEcc) { NodeEcc = Dist; } } return NodeEcc; } // Page Rank -- there are two different implementations (uncomment the desired 2 lines): // Berkhin -- (the correct way) see Algorithm 1 of P. Berkhin, A Survey on PageRank Computing, Internet Mathematics, 2005 // iGraph -- iGraph implementation(which treats leaked PageRank in a funny way) // This implementation is an unoptimized version, it accesses nodes via a hash table. template<class PGraph> void GetPageRank_v1(const PGraph& Graph, TIntFltH& PRankH, const double& C, const double& Eps, const int& MaxIter) { const int NNodes = Graph->GetNodes(); //const double OneOver = 1.0/double(NNodes); PRankH.Gen(NNodes); for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) { PRankH.AddDat(NI.GetId(), 1.0/NNodes); //IAssert(NI.GetId() == PRankH.GetKey(PRankH.Len()-1)); } TFltV TmpV(NNodes); for (int iter = 0; iter < MaxIter; iter++) { int j = 0; for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++, j++) { TmpV[j] = 0; for (int e = 0; e < NI.GetInDeg(); e++) { const int InNId = NI.GetInNId(e); const int OutDeg = Graph->GetNI(InNId).GetOutDeg(); if (OutDeg > 0) { TmpV[j] += PRankH.GetDat(InNId) / OutDeg; } } TmpV[j] = C*TmpV[j]; // Berkhin (the correct way of doing it) //TmpV[j] = C*TmpV[j] + (1.0-C)*OneOver; // iGraph } double diff=0, sum=0, NewVal; for (int i = 0; i < TmpV.Len(); i++) { sum += TmpV[i]; } const double Leaked = (1.0-sum) / double(NNodes); for (int i = 0; i < PRankH.Len(); i++) { // re-instert leaked PageRank NewVal = TmpV[i] + Leaked; // Berkhin //NewVal = TmpV[i] / sum; // iGraph diff += fabs(NewVal-PRankH[i]); PRankH[i] = NewVal; } if (diff < Eps) { break; } } } // Page Rank -- there are two different implementations (uncomment the desired 2 lines): // Berkhin -- (the correct way) see Algorithm 1 of P. Berkhin, A Survey on PageRank Computing, Internet Mathematics, 2005 // iGraph -- iGraph implementation(which treats leaked PageRank in a funny way) // This implementation is an optimized version, it builds a vector and accesses nodes via the vector. template<class PGraph> void GetPageRank(const PGraph& Graph, TIntFltH& PRankH, const double& C, const double& Eps, const int& MaxIter) { const int NNodes = Graph->GetNodes(); TVec<typename PGraph::TObj::TNodeI> NV; PRankH.Gen(NNodes); int MxId = -1; for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) { NV.Add(NI); PRankH.AddDat(NI.GetId(), 1.0/NNodes); int Id = NI.GetId(); if (Id > MxId) { MxId = Id; } } TFltV PRankV(MxId+1); TIntV OutDegV(MxId+1); for (int j = 0; j < NNodes; j++) { typename PGraph::TObj::TNodeI NI = NV[j]; int Id = NI.GetId(); PRankV[Id] = 1.0/NNodes; OutDegV[Id] = NI.GetOutDeg(); } TFltV TmpV(NNodes); for (int iter = 0; iter < MaxIter; iter++) { for (int j = 0; j < NNodes; j++) { typename PGraph::TObj::TNodeI NI = NV[j]; TFlt Tmp = 0; for (int e = 0; e < NI.GetInDeg(); e++) { const int InNId = NI.GetInNId(e); const int OutDeg = OutDegV[InNId]; if (OutDeg > 0) { Tmp += PRankV[InNId] / OutDeg; } } TmpV[j] = C*Tmp; // Berkhin (the correct way of doing it) } double sum = 0; for (int i = 0; i < TmpV.Len(); i++) { sum += TmpV[i]; } const double Leaked = (1.0-sum) / double(NNodes); double diff = 0; for (int i = 0; i < NNodes; i++) { typename PGraph::TObj::TNodeI NI = NV[i]; double NewVal = TmpV[i] + Leaked; // Berkhin int Id = NI.GetId(); diff += fabs(NewVal-PRankV[Id]); PRankV[Id] = NewVal; } if (diff < Eps) { break; } } for (int i = 0; i < NNodes; i++) { typename PGraph::TObj::TNodeI NI = NV[i]; PRankH[i] = PRankV[NI.GetId()]; } } #ifdef USE_OPENMP // Page Rank -- there are two different implementations (uncomment the desired 2 lines): // Berkhin -- (the correct way) see Algorithm 1 of P. Berkhin, A Survey on PageRank Computing, Internet Mathematics, 2005 // iGraph -- iGraph implementation(which treats leaked PageRank in a funny way) // This is a parallel, optimized version. template<class PGraph> void GetPageRankMP(const PGraph& Graph, TIntFltH& PRankH, const double& C, const double& Eps, const int& MaxIter) { const int NNodes = Graph->GetNodes(); TVec<typename PGraph::TObj::TNodeI> NV; PRankH.Gen(NNodes); int MxId = -1; for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) { NV.Add(NI); PRankH.AddDat(NI.GetId(), 1.0/NNodes); int Id = NI.GetId(); if (Id > MxId) { MxId = Id; } } TFltV PRankV(MxId+1); TIntV OutDegV(MxId+1); #pragma omp parallel for schedule(dynamic,10000) for (int j = 0; j < NNodes; j++) { typename PGraph::TObj::TNodeI NI = NV[j]; int Id = NI.GetId(); PRankV[Id] = 1.0/NNodes; OutDegV[Id] = NI.GetOutDeg(); } TFltV TmpV(NNodes); for (int iter = 0; iter < MaxIter; iter++) { #pragma omp parallel for schedule(dynamic,10000) for (int j = 0; j < NNodes; j++) { typename PGraph::TObj::TNodeI NI = NV[j]; TFlt Tmp = 0; for (int e = 0; e < NI.GetInDeg(); e++) { const int InNId = NI.GetInNId(e); const int OutDeg = OutDegV[InNId]; if (OutDeg > 0) { Tmp += PRankV[InNId] / OutDeg; } } TmpV[j] = C*Tmp; // Berkhin (the correct way of doing it) } double sum = 0; #pragma omp parallel for reduction(+:sum) schedule(dynamic,10000) for (int i = 0; i < TmpV.Len(); i++) { sum += TmpV[i]; } const double Leaked = (1.0-sum) / double(NNodes); double diff = 0; #pragma omp parallel for reduction(+:diff) schedule(dynamic,10000) for (int i = 0; i < NNodes; i++) { double NewVal = TmpV[i] + Leaked; // Berkhin int Id = NV[i].GetId(); diff += fabs(NewVal-PRankV[Id]); PRankV[Id] = NewVal; } if (diff < Eps) { break; } } #pragma omp parallel for schedule(dynamic,10000) for (int i = 0; i < NNodes; i++) { typename PGraph::TObj::TNodeI NI = NV[i]; PRankH[i] = PRankV[NI.GetId()]; } } #endif // USE_OPENMP // Betweenness Centrality template<class PGraph> void GetBetweennessCentr(const PGraph& Graph, const TIntV& BtwNIdV, TIntFltH& NodeBtwH, const bool& DoNodeCent, TIntPrFltH& EdgeBtwH, const bool& DoEdgeCent, const bool& IsDir) { if (DoNodeCent) { NodeBtwH.Clr(); } if (DoEdgeCent) { EdgeBtwH.Clr(); } const int nodes = Graph->GetNodes(); TIntS S(nodes); TIntQ Q(nodes); TIntIntVH P(nodes); // one vector for every node TIntFltH delta(nodes); TIntH sigma(nodes), d(nodes); // init for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) { if (DoNodeCent) { NodeBtwH.AddDat(NI.GetId(), 0); } if (DoEdgeCent) { for (int e = 0; e < NI.GetOutDeg(); e++) { if (Graph->HasFlag(gfDirected) && IsDir) { // add all outgoing edges for directed graphs EdgeBtwH.AddDat(TIntPr(NI.GetId(), NI.GetOutNId(e)), 0); } else { // add each edge only once in undirected graphs if (NI.GetId() < NI.GetOutNId(e)) { EdgeBtwH.AddDat(TIntPr(NI.GetId(), NI.GetOutNId(e)), 0); } } } // add incoming edges in directed graphs that were not added yet if (Graph->HasFlag(gfDirected) && !IsDir) { for (int e = 0; e < NI.GetInDeg(); e++) { if (NI.GetId() < NI.GetInNId(e) && !Graph->IsEdge(NI.GetId(), NI.GetInNId(e))) { EdgeBtwH.AddDat(TIntPr(NI.GetId(), NI.GetInNId(e)), 0); } } } } sigma.AddDat(NI.GetId(), 0); d.AddDat(NI.GetId(), -1); P.AddDat(NI.GetId(), TIntV()); delta.AddDat(NI.GetId(), 0); } // calc betweeness for (int k=0; k < BtwNIdV.Len(); k++) { const typename PGraph::TObj::TNodeI NI = Graph->GetNI(BtwNIdV[k]); // reset for (int i = 0; i < sigma.Len(); i++) { sigma[i]=0; d[i]=-1; delta[i]=0; P[i].Clr(false); } S.Clr(false); Q.Clr(false); sigma.AddDat(NI.GetId(), 1); d.AddDat(NI.GetId(), 0); Q.Push(NI.GetId()); while (! Q.Empty()) { const int v = Q.Top(); Q.Pop(); const typename PGraph::TObj::TNodeI NI2 = Graph->GetNI(v); S.Push(v); const int VDat = d.GetDat(v); // iterate over all outgoing edges for (int e = 0; e < NI2.GetOutDeg(); e++) { const int w = NI2.GetOutNId(e); if (d.GetDat(w) < 0) { // find w for the first time Q.Push(w); d.AddDat(w, VDat+1); } //shortest path to w via v ? if (d.GetDat(w) == VDat+1) { sigma.AddDat(w) += sigma.GetDat(v); P.GetDat(w).Add(v); } } // if ignoring direction in directed networks, iterate over incoming edges if (Graph->HasFlag(gfDirected) && !IsDir) { for (int e = 0; e < NI2.GetInDeg(); e++) { const int w = NI2.GetInNId(e); // skip neighbors that are also outgoing if (Graph->IsEdge(NI2.GetId(), w)) { continue; } if (d.GetDat(w) < 0) { // find w for the first time Q.Push(w); d.AddDat(w, VDat+1); } //shortest path to w via v ? if (d.GetDat(w) == VDat+1) { sigma.AddDat(w) += sigma.GetDat(v); P.GetDat(w).Add(v); } } } } while (! S.Empty()) { const int w = S.Top(); const double SigmaW = sigma.GetDat(w); const double DeltaW = delta.GetDat(w); const TIntV NIdV = P.GetDat(w); S.Pop(); for (int i = 0; i < NIdV.Len(); i++) { const int NId = NIdV[i]; const double c = (sigma.GetDat(NId)*1.0/SigmaW) * (1+DeltaW); delta.AddDat(NId) += c; if (DoEdgeCent) { if (Graph->HasFlag(gfDirected) && IsDir) { EdgeBtwH.AddDat(TIntPr(NId, w)) += c; } else { EdgeBtwH.AddDat(TIntPr(TMath::Mn(NId, w), TMath::Mx(NId, w))) += c; } } } if (DoNodeCent && w != NI.GetId()) { NodeBtwH.AddDat(w) += delta.GetDat(w)/2.0; } } } } template<class PGraph> void GetBetweennessCentr(const PGraph& Graph, TIntFltH& NodeBtwH, const double& NodeFrac, const bool& IsDir) { TIntPrFltH EdgeBtwH; TIntV NIdV; Graph->GetNIdV(NIdV); if (NodeFrac < 1.0) { // calculate beetweenness centrality for a subset of nodes NIdV.Shuffle(TInt::Rnd); for (int i = int((1.0-NodeFrac)*NIdV.Len()); i > 0; i--) { NIdV.DelLast(); } } GetBetweennessCentr<PGraph> (Graph, NIdV, NodeBtwH, true, EdgeBtwH, false, IsDir); } template<class PGraph> void GetBetweennessCentr(const PGraph& Graph, TIntPrFltH& EdgeBtwH, const double& NodeFrac, const bool& IsDir) { TIntFltH NodeBtwH; TIntV NIdV; Graph->GetNIdV(NIdV); if (NodeFrac < 1.0) { // calculate beetweenness centrality for a subset of nodes NIdV.Shuffle(TInt::Rnd); for (int i = int((1.0-NodeFrac)*NIdV.Len()); i > 0; i--) { NIdV.DelLast(); } } GetBetweennessCentr<PGraph> (Graph, NIdV, NodeBtwH, false, EdgeBtwH, true, IsDir); } template<class PGraph> void GetBetweennessCentr(const PGraph& Graph, TIntFltH& NodeBtwH, TIntPrFltH& EdgeBtwH, const double& NodeFrac, const bool& IsDir) { TIntV NIdV; Graph->GetNIdV(NIdV); if (NodeFrac < 1.0) { // calculate beetweenness centrality for a subset of nodes NIdV.Shuffle(TInt::Rnd); for (int i = int((1.0-NodeFrac)*NIdV.Len()); i > 0; i--) { NIdV.DelLast(); } } GetBetweennessCentr<PGraph> (Graph, NIdV, NodeBtwH, true, EdgeBtwH, true, IsDir); } template<class PGraph> void GetHits(const PGraph& Graph, TIntFltH& NIdHubH, TIntFltH& NIdAuthH, const int& MaxIter) { const int NNodes = Graph->GetNodes(); NIdHubH.Gen(NNodes); NIdAuthH.Gen(NNodes); for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) { NIdHubH.AddDat(NI.GetId(), 1.0); NIdAuthH.AddDat(NI.GetId(), 1.0); } double Norm=0; for (int iter = 0; iter < MaxIter; iter++) { // update authority scores Norm = 0; for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) { double& Auth = NIdAuthH.GetDat(NI.GetId()).Val; Auth = 0; for (int e = 0; e < NI.GetInDeg(); e++) { Auth += NIdHubH.GetDat(NI.GetInNId(e)); } Norm += Auth*Auth; } Norm = sqrt(Norm); for (int i = 0; i < NIdAuthH.Len(); i++) { NIdAuthH[i] /= Norm; } // update hub scores for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) { double& Hub = NIdHubH.GetDat(NI.GetId()).Val; Hub = 0; for (int e = 0; e < NI.GetOutDeg(); e++) { Hub += NIdAuthH.GetDat(NI.GetOutNId(e)); } Norm += Hub*Hub; } Norm = sqrt(Norm); for (int i = 0; i < NIdHubH.Len(); i++) { NIdHubH[i] /= Norm; } } // make sure Hub and Authority scores normalize to L2 norm 1 Norm = 0.0; for (int i = 0; i < NIdHubH.Len(); i++) { Norm += TMath::Sqr(NIdHubH[i]); } Norm = sqrt(Norm); for (int i = 0; i < NIdHubH.Len(); i++) { NIdHubH[i] /= Norm; } Norm = 0.0; for (int i = 0; i < NIdAuthH.Len(); i++) { Norm += TMath::Sqr(NIdAuthH[i]); } Norm = sqrt(Norm); for (int i = 0; i < NIdAuthH.Len(); i++) { NIdAuthH[i] /= Norm; } } #ifdef USE_OPENMP template<class PGraph> void GetHitsMP(const PGraph& Graph, TIntFltH& NIdHubH, TIntFltH& NIdAuthH, const int& MaxIter) { const int NNodes = Graph->GetNodes(); TIntV NV; NIdHubH.Gen(NNodes); NIdAuthH.Gen(NNodes); for (typename PGraph::TObj::TNodeI NI = Graph->BegNI(); NI < Graph->EndNI(); NI++) { NV.Add(NI.GetId()); NIdHubH.AddDat(NI.GetId(), 1.0); NIdAuthH.AddDat(NI.GetId(), 1.0); } double Norm=0; for (int iter = 0; iter < MaxIter; iter++) { // update authority scores Norm = 0; #pragma omp parallel for reduction(+:Norm) schedule(dynamic,1000) for (int i = 0; i < NNodes; i++) { typename PGraph::TObj::TNodeI NI = Graph->GetNI(NV[i]); double& Auth = NIdAuthH.GetDat(NI.GetId()).Val; Auth = 0; for (int e = 0; e < NI.GetInDeg(); e++) { Auth += NIdHubH.GetDat(NI.GetInNId(e)); } Norm = Norm + Auth*Auth; } Norm = sqrt(Norm); for (int i = 0; i < NIdAuthH.Len(); i++) { NIdAuthH[i] /= Norm; } // update hub scores #pragma omp parallel for reduction(+:Norm) schedule(dynamic,1000) for (int i = 0; i < NNodes; i++) { typename PGraph::TObj::TNodeI NI = Graph->GetNI(NV[i]); double& Hub = NIdHubH.GetDat(NI.GetId()).Val; Hub = 0; for (int e = 0; e < NI.GetOutDeg(); e++) { Hub += NIdAuthH.GetDat(NI.GetOutNId(e)); } Norm = Norm + Hub*Hub; } Norm = sqrt(Norm); for (int i = 0; i < NIdHubH.Len(); i++) { NIdHubH[i] /= Norm; } } // make sure Hub and Authority scores normalize to L2 norm 1 Norm = 0.0; for (int i = 0; i < NIdHubH.Len(); i++) { Norm += TMath::Sqr(NIdHubH[i]); } Norm = sqrt(Norm); for (int i = 0; i < NIdHubH.Len(); i++) { NIdHubH[i] /= Norm; } Norm = 0.0; for (int i = 0; i < NIdAuthH.Len(); i++) { Norm += TMath::Sqr(NIdAuthH[i]); } Norm = sqrt(Norm); for (int i = 0; i < NIdAuthH.Len(); i++) { NIdAuthH[i] /= Norm; } } #endif /// Gets sequence of PageRank tables from given \c GraphSeq into \c TableSeq. template <class PGraph> void MapPageRank(const TVec<PGraph>& GraphSeq, TVec<PTable>& TableSeq, TTableContext* Context, const double& C, const double& Eps, const int& MaxIter) { int NumGraphs = GraphSeq.Len(); TableSeq.Reserve(NumGraphs, NumGraphs); // This loop is parallelizable. for (TInt i = 0; i < NumGraphs; i++) { TIntFltH PRankH; GetPageRank(GraphSeq[i], PRankH, C, Eps, MaxIter); TableSeq[i] = TTable::TableFromHashMap(PRankH, "NodeId", "PageRank", Context, false); } } /// Gets sequence of Hits tables from given \c GraphSeq into \c TableSeq. template <class PGraph> void MapHits(const TVec<PGraph>& GraphSeq, TVec<PTable>& TableSeq, TTableContext* Context, const int& MaxIter) { int NumGraphs = GraphSeq.Len(); TableSeq.Reserve(NumGraphs, NumGraphs); // This loop is parallelizable. for (TInt i = 0; i < NumGraphs; i++) { TIntFltH HubH; TIntFltH AuthH; GetHits(GraphSeq[i], HubH, AuthH, MaxIter); PTable HubT = TTable::TableFromHashMap(HubH, "NodeId", "Hub", Context, false); PTable AuthT = TTable::TableFromHashMap(AuthH, "NodeId", "Authority", Context, false); PTable HitsT = HubT->Join("NodeId", AuthT, "NodeId"); HitsT->Rename("1.NodeId", "NodeId"); HitsT->Rename("1.Hub", "Hub"); HitsT->Rename("2.Authority", "Authority"); TStrV V = TStrV(3, 0); V.Add("NodeId"); V.Add("Hub"); V.Add("Authority"); HitsT->ProjectInPlace(V); TableSeq[i] = HitsT; } } }; // namespace TSnap
appTwoFor_omp-lols.c
/* -- Libraries for C/C++ -- */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <iostream> using namespace std; /* -- library for parallelization of code -- */ // #include <pthread.h> #ifdef MPI #include <mpi.h> #endif #ifdef _OPENMP #include <omp.h> #endif /* variables to support parallelization of code */ #define DEFAULT_NUMTHREADS 16 // default constant and set value for the number of threads int numThreads; /* -- Application specific #defines and variables -- */ int numOuterIters; int probSize; #define DEFAULT_NUMOUTERITERS 1 #define DEFAULT_PROBSIZE 500 // Default values based on architecture. For a proper test, the problem size ought to be such that data goes out of cache. #define MAX_NUMITERS 100000000 #define MAX_PROBSIZE 16384 // Default values based on architecture. For a proper test, the problem size ought to be such that data goes out of cache. /* -- Debugging -- */ #define VERBOSE 1 /* -- Performance Measurement -- */ double totalTime = 0.0; FILE* myfile;// output file for experimental data /* -- Hardware Profiling -- */ // #define USE_PAPI #ifdef USE_PAPI #include <papi.h> // Leave PAPI out for now to make fully portable. Some platforms don't have the L2 and L3 cache misses available. TODO: need a way to check the counters in config or programmatically. #endif /* --Library for scheduling strategy and variables and macros associated with the library -- */ #include "vSched.h" // in the below macros, strat is how we specify the library #define FORALL_BEGIN(strat, s,e, start, end, tid, numThds ) loop_start_ ## strat (s,e ,&start, &end, tid, numThds); do { #define FORALL_END(strat, start, end, tid) } while( loop_next_ ## strat (&start, &end, tid)); int main (int argc, char** argv ); int i4_min ( int i1, int i2 ); void timestamp ( ); /******************************************************************************/ int main (int argc, char** argv ) /******************************************************************************/ /* Purpose MAIN is the main program for MANDELBROT_OPENMP. Discussion: MANDELBROT_OPENMP computes an image of the Mandelbrot set. Licensing: This code is distributed under the GNU LGPL license. Original Modified: 03 September 2012 Revision Modified: 19 April 2020 Author: John Burkardt Revision Author: Vivek Kale Purpose: The revision author uses the original code to demonstrate and test the use low-overhead loop scheduling strategy through library vSched Local Parameters: Local, int COUNT_MAX, the maximum number of iterations taken for a particular pixel. */ { int m = 500; int n = 500; int b[m][n]; int c; int count[m][n]; int count_max = 2000; int g[m][n]; int i; int j; int jhi; int jlo; int k; char const *output_filename = "mandelbrot_openmp.ppm"; // use const to allow for c++ string conversion FILE *output_unit; int r[m][n]; double wtime; double x_max = 1.25; double x_min = - 2.25; double x; double x1; double x2; double y_max = 1.75; double y_min = - 1.75; double y; double y1; double y2; #ifdef MPI MPI_Init(&argc, &argv); #endif //for vSched int numThreads = DEFAULT_NUMTHREADS; int threadNum; double static_fraction = 0.5; double constraint = 0.1; int chunk_size = 1; if(argc <= 2) // if user fails to put in minimum args, which are for application domain specific for this test { // char userReplyDefault; cout << "Usage: testAppTwo_omp_{loopSched} [probSize] [numOuterIters] [chunk_size] <static_fraction> <constraint>" << endl; cout << "Usage(cont'd): where {loopSched} is the implementation strategy or library you use, e.g., vSched's low-overhead scheduling, OpenMP rtl's low-overhead scheduling" << endl; // cout << "Use defaults? [y/N]" << endl ; // cin << varName; cout << "continuing with default problem size and app parameters." << endl; probSize = DEFAULT_PROBSIZE; numOuterIters = DEFAULT_NUMOUTERITERS; } // else // required args else { probSize = atoi(argv[1]); numOuterIters = atoi(argv[2]); } if (argc > 3) chunk_size = atoi(argv[3]); if (argc > 4) static_fraction = atof(argv[4]); if (argc > 5) constraint = atof(argv[5]); // set number of threads to input //omp_set_num_threads(numThreads); #pragma omp parallel { #pragma omp master cout << "Number of threads is : " << omp_get_num_threads() << endl; #ifdef USE_VSCHED numThreads = omp_get_num_threads(); #endif } #ifdef USE_VSCHED vSched_init(numThreads); #endif timestamp ( ); printf ( "\n" ); printf ( "MANDELBROT_OPENMP\n" ); printf ( " C/OpenMP version\n" ); printf ( "\n" ); printf ( " Create an ASCII PPM image of the Mandelbrot set.\n" ); printf ( "\n" ); printf ( " For each point C = X + i*Y\n" ); printf ( " with X range [%g,%g]\n", x_min, x_max ); printf ( " and Y range [%g,%g]\n", y_min, y_max ); printf ( " carry out %d iterations of the map\n", count_max ); printf ( " Z(n+1) = Z(n)^2 + C.\n" ); printf ( " If the iterates stay bounded (norm less than 2)\n" ); printf ( " then C is taken to be a member of the set.\n" ); printf ( "\n" ); printf ( " An ASCII PPM image of the set is created using\n" ); printf ( " M = %d pixels in the X direction and\n", m ); printf ( " N = %d pixels in the Y direction.\n", n ); omp_sched_t schedule; int chunksize = chunk_size; omp_get_schedule(&schedule, &chunksize); printf ( "OpenMP schedule: OMP_SCHEDULE=%d\t%d\n", (int) schedule, chunksize); wtime = omp_get_wtime ( ); /* Carry out the iteration for each pixel, determining COUNT. */ for (int iter = 0; iter < numOuterIters; iter++) { # pragma omp parallel \ shared ( b, count, count_max, g, r, x_max, x_min, y_max, y_min ) \ private ( i, j, k, x, x1, x2, y, y1, y2 ) { // # pragma omp for //TODO: figure out how to better template the scheduling strategy name in the below lines of code for both library implmentations // # pragma omp for schedule(user:statdynstaggered, &lr) collapse(2) // prototype UDS placeholder // The first parameter is the loop scheduling strategy. threadNum = omp_get_thread_num(); numThreads = omp_get_num_threads(); #ifdef USE_VSCHED int startInd; int endInd; // chunk start and end indices for vSched runtime to set and retrieve setCDY(static_fraction, constraint, chunk_size); // set parameter of scheduling strategy for vSched probSize = m; // set to loop bound of loop that gets parallelized by OpenMP FORALL_BEGIN(statdynstaggered, 0, probSize, startInd, endInd, threadNum, numThreads) #ifdef VERBOSE if(VERBOSE==1) printf("Thread [%d] : iter = %d \t startInd = %d \t endInd = %d \t\n", threadNum,iter, startInd, endInd); #endif for (i = startInd ; i < endInd ; i++) #else #ifdef VERBOSE if(VERBOSE==1) printf("Thread [%d] : iter = %d executing a chunk \n", threadNum,iter); #endif #pragma omp for schedule(guided, chunk_size) { // add parens to show comparison with forall macro for ( i = 0; i < m; i++ ) #endif { y = ( ( double ) ( i - 1 ) * y_max + ( double ) ( m - i ) * y_min ) / ( double ) ( m - 1 ); for ( j = 0; j < n; j++ ) { x = ( ( double ) ( j - 1 ) * x_max + ( double ) ( n - j ) * x_min ) / ( double ) ( n - 1 ); count[i][j] = 0; x1 = x; y1 = y; for ( k = 1; k <= count_max; k++ ) { x2 = x1 * x1 - y1 * y1 + x; y2 = 2 * x1 * y1 + y; if ( x2 < -2.0 || 2.0 < x2 || y2 < -2.0 || 2.0 < y2 ) { count[i][j] = k; break; } x1 = x2; y1 = y2; } if ( ( count[i][j] % 2 ) == 1 ) { r[i][j] = 255; g[i][j] = 255; b[i][j] = 255; } else { c = ( int ) ( 255.0 * sqrt ( sqrt ( sqrt ( ( ( double ) ( count[i][j] ) / ( double ) ( count_max ) ) ) ) ) ); r[i][j] = 3 * c / 5; g[i][j] = 3 * c / 5; b[i][j] = c; } } } #ifdef USE_VSCHED FORALL_END(statdynstaggered, startInd, endInd, threadNum) #else } //End parallelized for block #endif } // end parallel } // end outer iter loop wtime = omp_get_wtime ( ) - wtime; printf ( "\n" ); printf ( " Time = %g seconds.\n", wtime ); /* Write data to an ASCII PPM file. */ output_unit = fopen ( output_filename, "wt" ); fprintf ( output_unit, "P3\n" ); fprintf ( output_unit, "%d %d\n", n, m ); fprintf ( output_unit, "%d\n", 255 ); for ( i = 0; i < m; i++ ) { for ( jlo = 0; jlo < n; jlo = jlo + 4 ) { jhi = i4_min ( jlo + 4, n ); for ( j = jlo; j < jhi; j++ ) { fprintf ( output_unit, " %d %d %d", r[i][j], g[i][j], b[i][j] ); } fprintf ( output_unit, "\n" ); } } fclose ( output_unit ); printf ( "\n" ); printf ( " Graphics data written to \"%s\".\n", output_filename ); /* Terminate. */ printf ( "\n" ); printf ( "MANDELBROT_OPENMP\n" ); printf ( " Normal end of execution.\n" ); printf ( "\n" ); timestamp ( ); #ifdef USE_VSCHED vSched_finalize(numThreads); #endif #ifdef MPI MPI_Finalize(MPI_COMM_WORLD); #endif return 0; } /******************************************************************************/ int i4_min ( int i1, int i2 ) /******************************************************************************/ /* Purpose: I4_MIN returns the smaller of two I4's. Licensing: This code is distributed under the GNU LGPL license. Modified: 29 August 2006 Author: John Burkardt Parameters: Input, int I1, I2, two integers to be compared. Output, int I4_MIN, the smaller of I1 and I2. */ { int value; if ( i1 < i2 ) { value = i1; } else { value = i2; } return value; } /******************************************************************************/ void timestamp ( ) /******************************************************************************/ /* Purpose: TIMESTAMP prints the current YMDHMS date as a time stamp. Example: 31 May 2001 09:45:54 AM Modified: 24 September 2003 Author: John Burkardt Parameters: None */ { # define TIME_SIZE 40 static char time_buffer[TIME_SIZE]; const struct tm *tm; time_t now; now = time ( NULL ); tm = localtime ( &now ); strftime ( time_buffer, TIME_SIZE, "%d %B %Y %I:%M:%S %p", tm ); printf ( "%s\n", time_buffer ); return; # undef TIME_SIZE }
GB_unaryop__identity_int8_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int8_uint16 // op(A') function: GB_tran__identity_int8_uint16 // C type: int8_t // A type: uint16_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int8_uint16 ( int8_t *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int8_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp.c
// note not doing O0 below as to ensure we get tbaa // TODO: %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O1 -disable-llvm-optzns %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - -o %s.out && %s.out // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O1 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O2 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O3 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi // note not doing O0 below as to ensure we get tbaa // TODO: %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O1 -Xclang -disable-llvm-optzns %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - -o %s.out && %s.out // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O1 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O2 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi // RUN: if [ %llvmver -ge 9 ]; then %clang -fopenmp -std=c11 -fno-vectorize -fno-unroll-loops -O3 %s -S -emit-llvm -o - | %opt - %loadEnzyme -enzyme -enzyme-inline=1 -S | %clang -fopenmp -x ir - -o %s.out && %s.out ; fi #include <stdio.h> #include <math.h> #include <assert.h> #include "test_utils.h" double __enzyme_autodiff(void*, ...); /* void omp(float& a, int N) { #define N 20 #pragma omp parallel for for (int i=0; i<N; i++) { //a[i] *= a[i]; (&a)[i] *= (&a)[i]; } #undef N (&a)[0] = 0; } */ void omp(float* a, int N) { #pragma omp parallel for for (int i=0; i<N; i++) { //a[i] *= a[i]; a[i] *= a[i]; } a[0] = 0; } int main(int argc, char** argv) { int N = 20; float a[N]; for(int i=0; i<N; i++) { a[i] = i+1; } float d_a[N]; for(int i=0; i<N; i++) d_a[i] = 1.0f; //omp(*a, N); printf("ran omp\n"); __enzyme_autodiff((void*)omp, a, d_a, N); for(int i=0; i<N; i++) { printf("a[%d]=%f d_a[%d]=%f\n", i, a[i], i, d_a[i]); } //APPROX_EQ(da, 17711.0*2, 1e-10); //APPROX_EQ(db, 17711.0*2, 1e-10); //printf("hello! %f, res2 %f, da: %f, db: %f\n", ret, ret, da,db); APPROX_EQ(d_a[0], 0.0f, 1e-10); for(int i=1; i<N; i++) { APPROX_EQ(d_a[i], 2.0f*(i+1), 1e-10); } return 0; }
DRB037-truedepseconddimension-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Only the outmost loop can be parallelized in this program. The inner loop has true dependence. Data race pair: b[i][j]@63:7 vs. b[i][j-1]@63:15 */ #include <stdlib.h> #include <stdio.h> #include <omp.h> double b[1000][1000]; int main(int argc,char *argv[]) { int i; int j; int n = 1000; int m = 1000; #pragma omp parallel for private (i,j) for (i = 0; i <= n - 1; i += 1) { #pragma omp parallel for private (j) for (j = 1; j <= m - 1; j += 1) { b[i][j] = (i * m + j); } } #pragma omp parallel for private (i,j) for (i = 0; i <= n - 1; i += 1) { for (j = 1; j <= m - 1; j += 1) { b[i][j] = b[i][j - 1]; } } for (i = 0; i <= n - 1; i += 1) { for (j = 1; j <= m - 1; j += 1) { printf("%lf\n",b[i][j]); } } return 0; }
GB_unaryop__minv_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_fp64_fp64 // op(A') function: GB_tran__minv_fp64_fp64 // C type: double // A type: double // cast: double cij = (double) aij // unaryop: cij = 1./aij #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1./x ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_fp64_fp64 ( double *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_lock.c
#include <stdio.h> #include <omp.h> #include "omp_testsuite.h" int check_omp_lock (FILE * logFile) { omp_lock_t lck; int nr_threads_in_single = 0; int result = 0; int nr_iterations = 0; int i; omp_init_lock (&lck); #pragma omp parallel shared(lck) { #pragma omp for for (i = 0; i < LOOPCOUNT; i++) { omp_set_lock (&lck); #pragma omp flush nr_threads_in_single++; #pragma omp flush nr_iterations++; nr_threads_in_single--; result = result + nr_threads_in_single; omp_unset_lock (&lck); } } omp_destroy_lock (&lck); return ((result == 0) && (nr_iterations == LOOPCOUNT)); } int crosscheck_omp_lock (FILE * logFile) { omp_lock_t lck; int nr_threads_in_single = 0; int result = 0; int nr_iterations = 0; int i; omp_init_lock (&lck); #pragma omp parallel shared(lck) { #pragma omp for for (i = 0; i < LOOPCOUNT; i++) { /*omp_set_lock(&lck); */ #pragma omp flush nr_threads_in_single++; #pragma omp flush nr_iterations++; nr_threads_in_single--; result = result + nr_threads_in_single; /*omp_unset_lock(&lck); */ } } omp_destroy_lock (&lck); return ((result == 0) && (nr_iterations == LOOPCOUNT)); }
GB_unaryop__abs_int8_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int8_int32 // op(A') function: GB_tran__abs_int8_int32 // C type: int8_t // A type: int32_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ int32_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT8 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int8_int32 ( int8_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int8_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
parallel.h
#pragma once #include "tino/backends/backend_types.h" /* // if both of OpenMP and intel tbb define, abort! #if defined(TINO_OPENMP_READY) && defined(TINO_INTEL_TBB_READY) #error parallel-backend is exclusive each other. #endif */ #include "tino/backends/backends.h" #include "tino/core/core.h" namespace tino { namespace utils { // wrapper function for paralleled_for template <typename Index_t, typename F> void concurrent_for(tino::core::context& ctx, Index_t loop_upper_bound, F Lambda) { switch (ctx.parallelize()) { case backends::parallelize_t::none: for (Index_t i = 0; i < loop_upper_bound; i++) { Lambda(i); } break; case backends::parallelize_t::openmp: #if defined(TINO_OPENMP_READY) #pragma omp parallel for for (Index_t i = 0; i < loop_upper_bound; i++) { Lambda(i); } #else std::cerr << "invalild parallel-backend: OpenMP" << std::endl; std::exit(1); #endif break; case backends::parallelize_t::intel_tbb: #if defined(TINO_INTEL_TBB_READY) tbb::parallel_for(tbb::blocked_range<int>(0, loop_upper_bound), [&](const tbb::blocked_range<int>& r) { for (int list = r.begin(); list < r.end(); list++) { Lambda(list); } }); #else std::cerr << "invalild parallel-backend: Intel TBB" << std::endl; std::exit(1); #endif break; default: break; } TINO_MAYBE_UNUSED(ctx); return; } } // namespace utils } // namespace tino
OpenMPClause.h
//===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // /// \file /// This file defines OpenMP AST classes for clauses. /// There are clauses for executable directives, clauses for declarative /// directives and clauses which can be used in both kinds of directives. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H #define LLVM_CLANG_AST_OPENMPCLAUSE_H #include "clang/AST/Decl.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/TrailingObjects.h" #include <cassert> #include <cstddef> #include <iterator> #include <utility> namespace clang { class ASTContext; //===----------------------------------------------------------------------===// // AST classes for clauses. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP clause. class OMPClause { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Ending location of the clause. SourceLocation EndLoc; /// Kind of the clause. OpenMPClauseKind Kind; protected: OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc) : StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {} public: /// Returns the starting location of the clause. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns the ending location of the clause. SourceLocation getEndLoc() const { return EndLoc; } /// Sets the starting location of the clause. void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Sets the ending location of the clause. void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Returns kind of OpenMP clause (private, shared, reduction, etc.). OpenMPClauseKind getClauseKind() const { return Kind; } bool isImplicit() const { return StartLoc.isInvalid(); } using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *) { return true; } }; /// Class that handles pre-initialization statement for some clauses, like /// 'shedule', 'firstprivate' etc. class OMPClauseWithPreInit { friend class OMPClauseReader; /// Pre-initialization statement for the clause. Stmt *PreInit = nullptr; /// Region that captures the associated stmt. OpenMPDirectiveKind CaptureRegion = OMPD_unknown; protected: OMPClauseWithPreInit(const OMPClause *This) { assert(get(This) && "get is not tuned for pre-init."); } /// Set pre-initialization statement for the clause. void setPreInitStmt(Stmt *S, OpenMPDirectiveKind ThisRegion = OMPD_unknown) { PreInit = S; CaptureRegion = ThisRegion; } public: /// Get pre-initialization statement for the clause. const Stmt *getPreInitStmt() const { return PreInit; } /// Get pre-initialization statement for the clause. Stmt *getPreInitStmt() { return PreInit; } /// Get capture region for the stmt in the clause. OpenMPDirectiveKind getCaptureRegion() const { return CaptureRegion; } static OMPClauseWithPreInit *get(OMPClause *C); static const OMPClauseWithPreInit *get(const OMPClause *C); }; /// Class that handles post-update expression for some clauses, like /// 'lastprivate', 'reduction' etc. class OMPClauseWithPostUpdate : public OMPClauseWithPreInit { friend class OMPClauseReader; /// Post-update expression for the clause. Expr *PostUpdate = nullptr; protected: OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This) { assert(get(This) && "get is not tuned for post-update."); } /// Set pre-initialization statement for the clause. void setPostUpdateExpr(Expr *S) { PostUpdate = S; } public: /// Get post-update expression for the clause. const Expr *getPostUpdateExpr() const { return PostUpdate; } /// Get post-update expression for the clause. Expr *getPostUpdateExpr() { return PostUpdate; } static OMPClauseWithPostUpdate *get(OMPClause *C); static const OMPClauseWithPostUpdate *get(const OMPClause *C); }; /// This represents clauses with the list of variables like 'private', /// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the /// '#pragma omp ...' directives. template <class T> class OMPVarListClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of variables in the list. unsigned NumVars; protected: /// Build a clause with \a N variables /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {} /// Fetches list of variables associated with this clause. MutableArrayRef<Expr *> getVarRefs() { return MutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } /// Sets the list of variables for this clause. void setVarRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumVars && "Number of variables is not the same as the preallocated buffer"); std::copy(VL.begin(), VL.end(), static_cast<T *>(this)->template getTrailingObjects<Expr *>()); } public: using varlist_iterator = MutableArrayRef<Expr *>::iterator; using varlist_const_iterator = ArrayRef<const Expr *>::iterator; using varlist_range = llvm::iterator_range<varlist_iterator>; using varlist_const_range = llvm::iterator_range<varlist_const_iterator>; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVarRefs().begin(); } varlist_iterator varlist_end() { return getVarRefs().end(); } varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); } varlist_const_iterator varlist_end() const { return getVarRefs().end(); } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Fetches list of all variables in the clause. ArrayRef<const Expr *> getVarRefs() const { return llvm::makeArrayRef( static_cast<const T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } }; /// This represents 'if' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel if(parallel:a > 5) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'if' clause with /// condition 'a > 5' and directive name modifier 'parallel'. class OMPIfClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Location of ':' (if any). SourceLocation ColonLoc; /// Directive name modifier for the clause. OpenMPDirectiveKind NameModifier = OMPD_unknown; /// Name modifier location. SourceLocation NameModifierLoc; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } /// Set directive name modifier for the clause. void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; } /// Set location of directive name modifier for the clause. void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; } /// Set location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Build 'if' clause with condition \a Cond. /// /// \param NameModifier [OpenMP 4.1] Directive name modifier of clause. /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param NameModifierLoc Location of directive name modifier. /// \param ColonLoc [OpenMP 4.1] Location of ':'. /// \param EndLoc Ending location of the clause. OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc) : OMPClause(OMPC_if, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc), NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPIfClause() : OMPClause(OMPC_if, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } /// Return directive name modifier associated with the clause. OpenMPDirectiveKind getNameModifier() const { return NameModifier; } /// Return the location of directive name modifier. SourceLocation getNameModifierLoc() const { return NameModifierLoc; } child_range children() { return child_range(&Condition, &Condition + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_if; } }; /// This represents 'final' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task final(a > 5) /// \endcode /// In this example directive '#pragma omp task' has simple 'final' /// clause with condition 'a > 5'. class OMPFinalClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } public: /// Build 'final' clause with condition \a Cond. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Cond Condition of the clause. /// \param EndLoc Ending location of the clause. OMPFinalClause(Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_final, StartLoc, EndLoc), LParenLoc(LParenLoc), Condition(Cond) {} /// Build an empty clause. OMPFinalClause() : OMPClause(OMPC_final, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } child_range children() { return child_range(&Condition, &Condition + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_final; } }; /// This represents 'num_threads' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel num_threads(6) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'num_threads' /// clause with number of threads '6'. class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'num_threads' clause. Stmt *NumThreads = nullptr; /// Set condition. void setNumThreads(Expr *NThreads) { NumThreads = NThreads; } public: /// Build 'num_threads' clause with condition \a NumThreads. /// /// \param NumThreads Number of threads for the construct. /// \param HelperNumThreads Helper Number of threads for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_threads, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumThreads(NumThreads) { setPreInitStmt(HelperNumThreads, CaptureRegion); } /// Build an empty clause. OMPNumThreadsClause() : OMPClause(OMPC_num_threads, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); } child_range children() { return child_range(&NumThreads, &NumThreads + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_threads; } }; /// This represents 'safelen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd safelen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'safelen' /// with single expression '4'. /// If the safelen clause is used then no two iterations executed /// concurrently with SIMD instructions can have a greater distance /// in the logical iteration space than its value. The parameter of /// the safelen clause must be a constant positive integer expression. class OMPSafelenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Safelen = nullptr; /// Set safelen. void setSafelen(Expr *Len) { Safelen = Len; } public: /// Build 'safelen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc), Safelen(Len) {} /// Build an empty clause. explicit OMPSafelenClause() : OMPClause(OMPC_safelen, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); } child_range children() { return child_range(&Safelen, &Safelen + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_safelen; } }; /// This represents 'simdlen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd simdlen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'simdlen' /// with single expression '4'. /// If the 'simdlen' clause is used then it specifies the preferred number of /// iterations to be executed concurrently. The parameter of the 'simdlen' /// clause must be a constant positive integer expression. class OMPSimdlenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Simdlen = nullptr; /// Set simdlen. void setSimdlen(Expr *Len) { Simdlen = Len; } public: /// Build 'simdlen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc), Simdlen(Len) {} /// Build an empty clause. explicit OMPSimdlenClause() : OMPClause(OMPC_simdlen, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); } child_range children() { return child_range(&Simdlen, &Simdlen + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_simdlen; } }; /// This represents 'collapse' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd collapse(3) /// \endcode /// In this example directive '#pragma omp simd' has clause 'collapse' /// with single expression '3'. /// The parameter must be a constant positive integer expression, it specifies /// the number of nested loops that should be collapsed into a single iteration /// space. class OMPCollapseClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'collapse' clause. /// /// \param Num Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPCollapseClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// Build an empty clause. explicit OMPCollapseClause() : OMPClause(OMPC_collapse, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_collapse; } }; /// This represents 'default' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel default(shared) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'default' /// clause with kind 'shared'. class OMPDefaultClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'default' clause. OpenMPDefaultClauseKind Kind = OMPC_DEFAULT_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clauses. /// /// \param K Argument of clause. void setDefaultKind(OpenMPDefaultClauseKind K) { Kind = K; } /// Set argument location. /// /// \param KLoc Argument location. void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'default' clause with argument \a A ('none' or 'shared'). /// /// \param A Argument of the clause ('none' or 'shared'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDefaultClause(OpenMPDefaultClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPDefaultClause() : OMPClause(OMPC_default, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPDefaultClauseKind getDefaultKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_default; } }; /// This represents 'proc_bind' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel proc_bind(master) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'proc_bind' /// clause with kind 'master'. class OMPProcBindClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'proc_bind' clause. OpenMPProcBindClauseKind Kind = OMPC_PROC_BIND_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setProcBindKind(OpenMPProcBindClauseKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'proc_bind' clause with argument \a A ('master', 'close' or /// 'spread'). /// /// \param A Argument of the clause ('master', 'close' or 'spread'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPProcBindClause(OpenMPProcBindClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPProcBindClause() : OMPClause(OMPC_proc_bind, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPProcBindClauseKind getProcBindKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_proc_bind; } }; /// This represents 'unified_address' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_address /// \endcode /// In this example directive '#pragma omp requires' has 'unified_address' /// clause. class OMPUnifiedAddressClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_address' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_unified_address, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedAddressClause() : OMPClause(OMPC_unified_address, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_unified_address; } }; /// This represents 'unified_shared_memory' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_shared_memory /// \endcode /// In this example directive '#pragma omp requires' has 'unified_shared_memory' /// clause. class OMPUnifiedSharedMemoryClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_shared_memory' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_unified_shared_memory, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedSharedMemoryClause() : OMPClause(OMPC_unified_shared_memory, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_unified_shared_memory; } }; /// This represents 'reverse_offload' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires reverse_offload /// \endcode /// In this example directive '#pragma omp requires' has 'reverse_offload' /// clause. class OMPReverseOffloadClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'reverse_offload' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_reverse_offload, StartLoc, EndLoc) {} /// Build an empty clause. OMPReverseOffloadClause() : OMPClause(OMPC_reverse_offload, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_reverse_offload; } }; /// This represents 'dynamic_allocators' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires dynamic_allocators /// \endcode /// In this example directive '#pragma omp requires' has 'dynamic_allocators' /// clause. class OMPDynamicAllocatorsClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'dynamic_allocators' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_dynamic_allocators, StartLoc, EndLoc) {} /// Build an empty clause. OMPDynamicAllocatorsClause() : OMPClause(OMPC_dynamic_allocators, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_dynamic_allocators; } }; /// This represents 'atomic_default_mem_order' clause in the '#pragma omp /// requires' directive. /// /// \code /// #pragma omp requires atomic_default_mem_order(seq_cst) /// \endcode /// In this example directive '#pragma omp requires' has simple /// atomic_default_mem_order' clause with kind 'seq_cst'. class OMPAtomicDefaultMemOrderClause final : public OMPClause { friend class OMPClauseReader; /// Location of '(' SourceLocation LParenLoc; /// A kind of the 'atomic_default_mem_order' clause. OpenMPAtomicDefaultMemOrderClauseKind Kind = OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setAtomicDefaultMemOrderKind(OpenMPAtomicDefaultMemOrderClauseKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setAtomicDefaultMemOrderKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'atomic_default_mem_order' clause with argument \a A ('seq_cst', /// 'acq_rel' or 'relaxed'). /// /// \param A Argument of the clause ('seq_cst', 'acq_rel' or 'relaxed'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAtomicDefaultMemOrderClause(OpenMPAtomicDefaultMemOrderClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_atomic_default_mem_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPAtomicDefaultMemOrderClause() : OMPClause(OMPC_atomic_default_mem_order, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the locaiton of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPAtomicDefaultMemOrderClauseKind getAtomicDefaultMemOrderKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getAtomicDefaultMemOrderKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_atomic_default_mem_order; } }; /// This represents 'schedule' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for schedule(static, 3) /// \endcode /// In this example directive '#pragma omp for' has 'schedule' clause with /// arguments 'static' and '3'. class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPScheduleClauseKind Kind = OMPC_SCHEDULE_unknown; /// Modifiers for 'schedule' clause. enum {FIRST, SECOND, NUM_MODIFIERS}; OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS]; /// Locations of modifiers. SourceLocation ModifiersLoc[NUM_MODIFIERS]; /// Start location of the schedule ind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; } /// Set the first schedule modifier. /// /// \param M Schedule modifier. void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[FIRST] = M; } /// Set the second schedule modifier. /// /// \param M Schedule modifier. void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[SECOND] = M; } /// Set location of the first schedule modifier. void setFirstScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[FIRST] = Loc; } /// Set location of the second schedule modifier. void setSecondScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[SECOND] = Loc; } /// Set schedule modifier location. /// /// \param M Schedule modifier location. void setScheduleModifer(OpenMPScheduleClauseModifier M) { if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown) Modifiers[FIRST] = M; else { assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown); Modifiers[SECOND] = M; } } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'schedule' clause with schedule kind \a Kind and chunk size /// expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind Schedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. /// \param M1 The first modifier applied to 'schedule' clause. /// \param M1Loc Location of the first modifier /// \param M2 The second modifier applied to 'schedule' clause. /// \param M2Loc Location of the second modifier OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize, OpenMPScheduleClauseModifier M1, SourceLocation M1Loc, OpenMPScheduleClauseModifier M2, SourceLocation M2Loc) : OMPClause(OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); Modifiers[FIRST] = M1; Modifiers[SECOND] = M2; ModifiersLoc[FIRST] = M1Loc; ModifiersLoc[SECOND] = M2Loc; } /// Build an empty clause. explicit OMPScheduleClause() : OMPClause(OMPC_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) { Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown; Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown; } /// Get kind of the clause. OpenMPScheduleClauseKind getScheduleKind() const { return Kind; } /// Get the first modifier of the clause. OpenMPScheduleClauseModifier getFirstScheduleModifier() const { return Modifiers[FIRST]; } /// Get the second modifier of the clause. OpenMPScheduleClauseModifier getSecondScheduleModifier() const { return Modifiers[SECOND]; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getScheduleKindLoc() { return KindLoc; } /// Get the first modifier location. SourceLocation getFirstScheduleModifierLoc() const { return ModifiersLoc[FIRST]; } /// Get the second modifier location. SourceLocation getSecondScheduleModifierLoc() const { return ModifiersLoc[SECOND]; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_schedule; } }; /// This represents 'ordered' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for ordered (2) /// \endcode /// In this example directive '#pragma omp for' has 'ordered' clause with /// parameter 2. class OMPOrderedClause final : public OMPClause, private llvm::TrailingObjects<OMPOrderedClause, Expr *> { friend class OMPClauseReader; friend TrailingObjects; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Real number of loops. unsigned NumberOfLoops = 0; /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderedClause(Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num), NumberOfLoops(NumLoops) {} /// Build an empty clause. explicit OMPOrderedClause(unsigned NumLoops) : OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()), NumberOfLoops(NumLoops) {} /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. static OMPOrderedClause *Create(const ASTContext &C, Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Build an empty clause. static OMPOrderedClause* CreateEmpty(const ASTContext &C, unsigned NumLoops); /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } /// Set number of iterations for the specified loop. void setLoopNumIterations(unsigned NumLoop, Expr *NumIterations); /// Get number of iterations for all the loops. ArrayRef<Expr *> getLoopNumIterations() const; /// Set loop counter for the specified loop. void setLoopCounter(unsigned NumLoop, Expr *Counter); /// Get loops counter for the specified loop. Expr *getLoopCounter(unsigned NumLoop); const Expr *getLoopCounter(unsigned NumLoop) const; child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_ordered; } }; /// This represents 'nowait' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for nowait /// \endcode /// In this example directive '#pragma omp for' has 'nowait' clause. class OMPNowaitClause : public OMPClause { public: /// Build 'nowait' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_nowait, StartLoc, EndLoc) {} /// Build an empty clause. OMPNowaitClause() : OMPClause(OMPC_nowait, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_nowait; } }; /// This represents 'untied' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task untied /// \endcode /// In this example directive '#pragma omp task' has 'untied' clause. class OMPUntiedClause : public OMPClause { public: /// Build 'untied' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_untied, StartLoc, EndLoc) {} /// Build an empty clause. OMPUntiedClause() : OMPClause(OMPC_untied, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_untied; } }; /// This represents 'mergeable' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task mergeable /// \endcode /// In this example directive '#pragma omp task' has 'mergeable' clause. class OMPMergeableClause : public OMPClause { public: /// Build 'mergeable' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_mergeable, StartLoc, EndLoc) {} /// Build an empty clause. OMPMergeableClause() : OMPClause(OMPC_mergeable, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_mergeable; } }; /// This represents 'read' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic read /// \endcode /// In this example directive '#pragma omp atomic' has 'read' clause. class OMPReadClause : public OMPClause { public: /// Build 'read' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_read, StartLoc, EndLoc) {} /// Build an empty clause. OMPReadClause() : OMPClause(OMPC_read, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_read; } }; /// This represents 'write' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic write /// \endcode /// In this example directive '#pragma omp atomic' has 'write' clause. class OMPWriteClause : public OMPClause { public: /// Build 'write' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_write, StartLoc, EndLoc) {} /// Build an empty clause. OMPWriteClause() : OMPClause(OMPC_write, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_write; } }; /// This represents 'update' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic update /// \endcode /// In this example directive '#pragma omp atomic' has 'update' clause. class OMPUpdateClause : public OMPClause { public: /// Build 'update' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_update, StartLoc, EndLoc) {} /// Build an empty clause. OMPUpdateClause() : OMPClause(OMPC_update, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_update; } }; /// This represents 'capture' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has 'capture' clause. class OMPCaptureClause : public OMPClause { public: /// Build 'capture' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_capture, StartLoc, EndLoc) {} /// Build an empty clause. OMPCaptureClause() : OMPClause(OMPC_capture, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_capture; } }; /// This represents 'seq_cst' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic seq_cst /// \endcode /// In this example directive '#pragma omp atomic' has 'seq_cst' clause. class OMPSeqCstClause : public OMPClause { public: /// Build 'seq_cst' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_seq_cst, StartLoc, EndLoc) {} /// Build an empty clause. OMPSeqCstClause() : OMPClause(OMPC_seq_cst, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_seq_cst; } }; /// This represents clause 'private' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// with the variables 'a' and 'b'. class OMPPrivateClause final : public OMPVarListClause<OMPPrivateClause>, private llvm::TrailingObjects<OMPPrivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPPrivateClause(unsigned N) : OMPVarListClause<OMPPrivateClause>(OMPC_private, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PrivateVL List of references to private copies with initializers. static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_private; } }; /// This represents clause 'firstprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel firstprivate(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'firstprivate' /// with the variables 'a' and 'b'. class OMPFirstprivateClause final : public OMPVarListClause<OMPFirstprivateClause>, public OMPClauseWithPreInit, private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFirstprivateClause>(OMPC_firstprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPreInit(this) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFirstprivateClause(unsigned N) : OMPVarListClause<OMPFirstprivateClause>( OMPC_firstprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPreInit(this) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new /// private variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new /// private variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. /// \param PrivateVL List of references to private copies with initializers. /// \param InitVL List of references to auto generated variables used for /// initialization of a single array element. Used if firstprivate variable is /// of array type. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. static OMPFirstprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL, ArrayRef<Expr *> InitVL, Stmt *PreInit); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_firstprivate; } }; /// This represents clause 'lastprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd lastprivate(a,b) /// \endcode /// In this example directive '#pragma omp simd' has clause 'lastprivate' /// with the variables 'a' and 'b'. class OMPLastprivateClause final : public OMPVarListClause<OMPLastprivateClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLastprivateClause, Expr *> { // There are 4 additional tail-allocated arrays at the end of the class: // 1. Contains list of pseudo variables with the default initialization for // each non-firstprivate variables. Used in codegen for initialization of // lastprivate copies. // 2. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents private variables // (for arrays, single array element). // 3. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents original variables // (for arrays, single array element). // 4. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of final assignment performed by the // lastprivate clause. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPLastprivateClause>(OMPC_lastprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPLastprivateClause(unsigned N) : OMPVarListClause<OMPLastprivateClause>( OMPC_lastprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Get the list of helper expressions for initialization of private /// copies for lastprivate variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent original variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign private copy of the variable to original variable. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// private variables (for arrays, single array element). /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// original variables (for arrays, single array element). /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// lastprivate clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLastprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; /// Set list of helper expressions, required for generation of private /// copies of original lastprivate variables. void setPrivateCopies(ArrayRef<Expr *> PrivateCopies); helper_expr_const_range private_copies() const { return helper_expr_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_range private_copies() { return helper_expr_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_lastprivate; } }; /// This represents clause 'shared' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel shared(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'shared' /// with the variables 'a' and 'b'. class OMPSharedClause final : public OMPVarListClause<OMPSharedClause>, private llvm::TrailingObjects<OMPSharedClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPSharedClause(unsigned N) : OMPVarListClause<OMPSharedClause>(OMPC_shared, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_shared; } }; /// This represents clause 'reduction' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'reduction' /// with operator '+' and the variables 'a' and 'b'. class OMPReductionClause final : public OMPVarListClause<OMPReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPReductionClause(unsigned N) : OMPVarListClause<OMPReductionClause>(OMPC_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private copy of the reduction /// variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent LHS expression in the final /// reduction expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent RHS expression in the final /// reduction expression performed by the reduction clause. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_reduction; } }; /// This represents clause 'task_reduction' in the '#pragma omp taskgroup' /// directives. /// /// \code /// #pragma omp taskgroup task_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp taskgroup' has clause /// 'task_reduction' with operator '+' and the variables 'a' and 'b'. class OMPTaskReductionClause final : public OMPVarListClause<OMPTaskReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPTaskReductionClause>(OMPC_task_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPTaskReductionClause(unsigned N) : OMPVarListClause<OMPTaskReductionClause>( OMPC_task_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPTaskReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_task_reduction; } }; /// This represents clause 'in_reduction' in the '#pragma omp task' directives. /// /// \code /// #pragma omp task in_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp task' has clause 'in_reduction' with /// operator '+' and the variables 'a' and 'b'. class OMPInReductionClause final : public OMPVarListClause<OMPInReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPInReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPInReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPInReductionClause>(OMPC_in_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInReductionClause(unsigned N) : OMPVarListClause<OMPInReductionClause>( OMPC_in_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper reduction taskgroup descriptors. void setTaskgroupDescriptors(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction taskgroup descriptors. MutableArrayRef<Expr *> getTaskgroupDescriptors() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getTaskgroupDescriptors() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param TaskgroupDescriptors List of helper taskgroup descriptors for /// corresponding items in parent taskgroup task_reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPInReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> TaskgroupDescriptors, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range taskgroup_descriptors() const { return helper_expr_const_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } helper_expr_range taskgroup_descriptors() { return helper_expr_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_in_reduction; } }; /// This represents clause 'linear' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd linear(a,b : 2) /// \endcode /// In this example directive '#pragma omp simd' has clause 'linear' /// with variables 'a', 'b' and linear step '2'. class OMPLinearClause final : public OMPVarListClause<OMPLinearClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLinearClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Modifier of 'linear' clause. OpenMPLinearClauseKind Modifier = OMPC_LINEAR_val; /// Location of linear modifier if any. SourceLocation ModifierLoc; /// Location of ':'. SourceLocation ColonLoc; /// Sets the linear step for clause. void setStep(Expr *Step) { *(getFinals().end()) = Step; } /// Sets the expression to calculate linear step for clause. void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; } /// Build 'linear' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, StartLoc, LParenLoc, EndLoc, NumVars), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPLinearClause(unsigned NumVars) : OMPVarListClause<OMPLinearClause>(OMPC_linear, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), OMPClauseWithPostUpdate(this) {} /// Gets the list of initial values for linear variables. /// /// There are NumVars expressions with initial values allocated after the /// varlist, they are followed by NumVars update expressions (used to update /// the linear variable's value on current iteration) and they are followed by /// NumVars final expressions (used to calculate the linear variable's /// value after the loop body). After these lists, there are 2 helper /// expressions - linear step and a helper to calculate it before the /// loop body (used when the linear step is not constant): /// /// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[]; /// Finals[]; Step; CalcStep; } MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Sets the list of update expressions for linear variables. MutableArrayRef<Expr *> getUpdates() { return MutableArrayRef<Expr *>(getInits().end(), varlist_size()); } ArrayRef<const Expr *> getUpdates() const { return llvm::makeArrayRef(getInits().end(), varlist_size()); } /// Sets the list of final update expressions for linear variables. MutableArrayRef<Expr *> getFinals() { return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size()); } ArrayRef<const Expr *> getFinals() const { return llvm::makeArrayRef(getUpdates().end(), varlist_size()); } /// Sets the list of the copies of original linear variables. /// \param PL List of expressions. void setPrivates(ArrayRef<Expr *> PL); /// Sets the list of the initial values for linear variables. /// \param IL List of expressions. void setInits(ArrayRef<Expr *> IL); public: /// Creates clause with a list of variables \a VL and a linear step /// \a Step. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Modifier Modifier of 'linear' clause. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PL List of private copies of original variables. /// \param IL List of initial values for the variables. /// \param Step Linear step. /// \param CalcStep Calculation of the linear step. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Set modifier. void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; } /// Return modifier. OpenMPLinearClauseKind getModifier() const { return Modifier; } /// Set modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Return modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns linear step. Expr *getStep() { return *(getFinals().end()); } /// Returns linear step. const Expr *getStep() const { return *(getFinals().end()); } /// Returns expression to calculate linear step. Expr *getCalcStep() { return *(getFinals().end() + 1); } /// Returns expression to calculate linear step. const Expr *getCalcStep() const { return *(getFinals().end() + 1); } /// Sets the list of update expressions for linear variables. /// \param UL List of expressions. void setUpdates(ArrayRef<Expr *> UL); /// Sets the list of final update expressions for linear variables. /// \param FL List of expressions. void setFinals(ArrayRef<Expr *> FL); using privates_iterator = MutableArrayRef<Expr *>::iterator; using privates_const_iterator = ArrayRef<const Expr *>::iterator; using privates_range = llvm::iterator_range<privates_iterator>; using privates_const_range = llvm::iterator_range<privates_const_iterator>; privates_range privates() { return privates_range(getPrivates().begin(), getPrivates().end()); } privates_const_range privates() const { return privates_const_range(getPrivates().begin(), getPrivates().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } using updates_iterator = MutableArrayRef<Expr *>::iterator; using updates_const_iterator = ArrayRef<const Expr *>::iterator; using updates_range = llvm::iterator_range<updates_iterator>; using updates_const_range = llvm::iterator_range<updates_const_iterator>; updates_range updates() { return updates_range(getUpdates().begin(), getUpdates().end()); } updates_const_range updates() const { return updates_const_range(getUpdates().begin(), getUpdates().end()); } using finals_iterator = MutableArrayRef<Expr *>::iterator; using finals_const_iterator = ArrayRef<const Expr *>::iterator; using finals_range = llvm::iterator_range<finals_iterator>; using finals_const_range = llvm::iterator_range<finals_const_iterator>; finals_range finals() { return finals_range(getFinals().begin(), getFinals().end()); } finals_const_range finals() const { return finals_const_range(getFinals().begin(), getFinals().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_linear; } }; /// This represents clause 'aligned' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd aligned(a,b : 8) /// \endcode /// In this example directive '#pragma omp simd' has clause 'aligned' /// with variables 'a', 'b' and alignment '8'. class OMPAlignedClause final : public OMPVarListClause<OMPAlignedClause>, private llvm::TrailingObjects<OMPAlignedClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Sets the alignment for clause. void setAlignment(Expr *A) { *varlist_end() = A; } /// Build 'aligned' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPAlignedClause(unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(OMPC_aligned, SourceLocation(), SourceLocation(), SourceLocation(), NumVars) {} public: /// Creates clause with a list of variables \a VL and alignment \a A. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param A Alignment. static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns alignment. Expr *getAlignment() { return *varlist_end(); } /// Returns alignment. const Expr *getAlignment() const { return *varlist_end(); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_aligned; } }; /// This represents clause 'copyin' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel copyin(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'copyin' /// with the variables 'a' and 'b'. class OMPCopyinClause final : public OMPVarListClause<OMPCopyinClause>, private llvm::TrailingObjects<OMPCopyinClause, Expr *> { // Class has 3 additional tail allocated arrays: // 1. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents sources. // 2. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents destinations. // 3. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of propagation of master's thread values of // threadprivate variables to local instances of that variables in other // implicit threads. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyinClause(unsigned N) : OMPVarListClause<OMPCopyinClause>(OMPC_copyin, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyin clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyin clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of propagation of master's thread values of /// threadprivate variables to local instances of that variables in other /// implicit threads. static OMPCopyinClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyin; } }; /// This represents clause 'copyprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp single copyprivate(a,b) /// \endcode /// In this example directive '#pragma omp single' has clause 'copyprivate' /// with the variables 'a' and 'b'. class OMPCopyprivateClause final : public OMPVarListClause<OMPCopyprivateClause>, private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyprivateClause>(OMPC_copyprivate, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyprivateClause(unsigned N) : OMPVarListClause<OMPCopyprivateClause>( OMPC_copyprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// copyprivate clause. static OMPCopyprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_copyprivate; } }; /// This represents implicit clause 'flush' for the '#pragma omp flush' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// flush' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has implicit clause 'flush' /// with the variables 'a' and 'b'. class OMPFlushClause final : public OMPVarListClause<OMPFlushClause>, private llvm::TrailingObjects<OMPFlushClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFlushClause(unsigned N) : OMPVarListClause<OMPFlushClause>(OMPC_flush, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_flush; } }; /// This represents implicit clause 'depend' for the '#pragma omp task' /// directive. /// /// \code /// #pragma omp task depend(in:a,b) /// \endcode /// In this example directive '#pragma omp task' with clause 'depend' with the /// variables 'a' and 'b' with dependency 'in'. class OMPDependClause final : public OMPVarListClause<OMPDependClause>, private llvm::TrailingObjects<OMPDependClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Dependency type (one of in, out, inout). OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; /// Dependency type location. SourceLocation DepLoc; /// Colon location. SourceLocation ColonLoc; /// Number of loops, associated with the depend clause. unsigned NumLoops = 0; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// \param NumLoops Number of loops that is associated with this depend /// clause. OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(OMPC_depend, StartLoc, LParenLoc, EndLoc, N), NumLoops(NumLoops) {} /// Build an empty clause. /// /// \param N Number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. explicit OMPDependClause(unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(OMPC_depend, SourceLocation(), SourceLocation(), SourceLocation(), N), NumLoops(NumLoops) {} /// Set dependency kind. void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; } /// Set dependency kind and its location. void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param DepKind Dependency type. /// \param DepLoc Location of the dependency type. /// \param ColonLoc Colon location. /// \param VL List of references to the variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL, unsigned NumLoops); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N, unsigned NumLoops); /// Get dependency type. OpenMPDependClauseKind getDependencyKind() const { return DepKind; } /// Get dependency type location. SourceLocation getDependencyLoc() const { return DepLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } /// Get number of loops associated with the clause. unsigned getNumLoops() const { return NumLoops; } /// Set the loop data for the depend clauses with 'sink|source' kind of /// dependency. void setLoopData(unsigned NumLoop, Expr *Cnt); /// Get the loop data. Expr *getLoopData(unsigned NumLoop); const Expr *getLoopData(unsigned NumLoop) const; child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_depend; } }; /// This represents 'device' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp target device(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'device' /// with single expression 'a'. class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Device number. Stmt *Device = nullptr; /// Set the device number. /// /// \param E Device number. void setDevice(Expr *E) { Device = E; } public: /// Build 'device' clause. /// /// \param E Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDeviceClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_device, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Device(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPDeviceClause() : OMPClause(OMPC_device, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return device number. Expr *getDevice() { return cast<Expr>(Device); } /// Return device number. Expr *getDevice() const { return cast<Expr>(Device); } child_range children() { return child_range(&Device, &Device + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_device; } }; /// This represents 'threads' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered threads /// \endcode /// In this example directive '#pragma omp ordered' has simple 'threads' clause. class OMPThreadsClause : public OMPClause { public: /// Build 'threads' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_threads, StartLoc, EndLoc) {} /// Build an empty clause. OMPThreadsClause() : OMPClause(OMPC_threads, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_threads; } }; /// This represents 'simd' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered simd /// \endcode /// In this example directive '#pragma omp ordered' has simple 'simd' clause. class OMPSIMDClause : public OMPClause { public: /// Build 'simd' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_simd, StartLoc, EndLoc) {} /// Build an empty clause. OMPSIMDClause() : OMPClause(OMPC_simd, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_simd; } }; /// Struct that defines common infrastructure to handle mappable /// expressions used in OpenMP clauses. class OMPClauseMappableExprCommon { public: /// Class that represents a component of a mappable expression. E.g. /// for an expression S.a, the first component is a declaration reference /// expression associated with 'S' and the second is a member expression /// associated with the field declaration 'a'. If the expression is an array /// subscript it may not have any associated declaration. In that case the /// associated declaration is set to nullptr. class MappableComponent { /// Expression associated with the component. Expr *AssociatedExpression = nullptr; /// Declaration associated with the declaration. If the component does /// not have a declaration (e.g. array subscripts or section), this is set /// to nullptr. ValueDecl *AssociatedDeclaration = nullptr; public: explicit MappableComponent() = default; explicit MappableComponent(Expr *AssociatedExpression, ValueDecl *AssociatedDeclaration) : AssociatedExpression(AssociatedExpression), AssociatedDeclaration( AssociatedDeclaration ? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl()) : nullptr) {} Expr *getAssociatedExpression() const { return AssociatedExpression; } ValueDecl *getAssociatedDeclaration() const { return AssociatedDeclaration; } }; // List of components of an expression. This first one is the whole // expression and the last one is the base expression. using MappableExprComponentList = SmallVector<MappableComponent, 8>; using MappableExprComponentListRef = ArrayRef<MappableComponent>; // List of all component lists associated to the same base declaration. // E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have // their component list but the same base declaration 'S'. using MappableExprComponentLists = SmallVector<MappableExprComponentList, 8>; using MappableExprComponentListsRef = ArrayRef<MappableExprComponentList>; protected: // Return the total number of elements in a list of component lists. static unsigned getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists); // Return the total number of elements in a list of declarations. All // declarations are expected to be canonical. static unsigned getUniqueDeclarationsTotalNumber(ArrayRef<const ValueDecl *> Declarations); }; /// This represents clauses with a list of expressions that are mappable. /// Examples of these clauses are 'map' in /// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from /// in '#pragma omp target update...' directives. template <class T> class OMPMappableExprListClause : public OMPVarListClause<T>, public OMPClauseMappableExprCommon { friend class OMPClauseReader; /// Number of unique declarations in this clause. unsigned NumUniqueDeclarations; /// Number of component lists in this clause. unsigned NumComponentLists; /// Total number of components in this clause. unsigned NumComponents; protected: /// Build a clause for \a NumUniqueDeclarations declarations, \a /// NumComponentLists total component lists, and \a NumComponents total /// components. /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause - one /// list for each expression in the clause. /// \param NumComponents Total number of expression components in the clause. OMPMappableExprListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPVarListClause<T>(K, StartLoc, LParenLoc, EndLoc, NumVars), NumUniqueDeclarations(NumUniqueDeclarations), NumComponentLists(NumComponentLists), NumComponents(NumComponents) {} /// Get the unique declarations that are in the trailing objects of the /// class. MutableArrayRef<ValueDecl *> getUniqueDeclsRef() { return MutableArrayRef<ValueDecl *>( static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Get the unique declarations that are in the trailing objects of the /// class. ArrayRef<ValueDecl *> getUniqueDeclsRef() const { return ArrayRef<ValueDecl *>( static_cast<const T *>(this) ->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Set the unique declarations that are in the trailing objects of the /// class. void setUniqueDecls(ArrayRef<ValueDecl *> UDs) { assert(UDs.size() == NumUniqueDeclarations && "Unexpected amount of unique declarations."); std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin()); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. MutableArrayRef<unsigned> getDeclNumListsRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. ArrayRef<unsigned> getDeclNumListsRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Set the number of lists per declaration that are in the trailing /// objects of the class. void setDeclNumLists(ArrayRef<unsigned> DNLs) { assert(DNLs.size() == NumUniqueDeclarations && "Unexpected amount of list numbers."); std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin()); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. MutableArrayRef<unsigned> getComponentListSizesRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. ArrayRef<unsigned> getComponentListSizesRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Set the cumulative component lists sizes that are in the trailing /// objects of the class. void setComponentListSizes(ArrayRef<unsigned> CLSs) { assert(CLSs.size() == NumComponentLists && "Unexpected amount of component lists."); std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin()); } /// Get the components that are in the trailing objects of the class. MutableArrayRef<MappableComponent> getComponentsRef() { return MutableArrayRef<MappableComponent>( static_cast<T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Get the components that are in the trailing objects of the class. ArrayRef<MappableComponent> getComponentsRef() const { return ArrayRef<MappableComponent>( static_cast<const T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Set the components that are in the trailing objects of the class. /// This requires the list sizes so that it can also fill the original /// expressions, which are the first component of each list. void setComponents(ArrayRef<MappableComponent> Components, ArrayRef<unsigned> CLSs) { assert(Components.size() == NumComponents && "Unexpected amount of component lists."); assert(CLSs.size() == NumComponentLists && "Unexpected amount of list sizes."); std::copy(Components.begin(), Components.end(), getComponentsRef().begin()); } /// Fill the clause information from the list of declarations and /// associated component lists. void setClauseInfo(ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists) { // Perform some checks to make sure the data sizes are consistent with the // information available when the clause was created. assert(getUniqueDeclarationsTotalNumber(Declarations) == NumUniqueDeclarations && "Unexpected number of mappable expression info entries!"); assert(getComponentsTotalNumber(ComponentLists) == NumComponents && "Unexpected total number of components!"); assert(Declarations.size() == ComponentLists.size() && "Declaration and component lists size is not consistent!"); assert(Declarations.size() == NumComponentLists && "Unexpected declaration and component lists size!"); // Organize the components by declaration and retrieve the original // expression. Original expressions are always the first component of the // mappable component list. llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>> ComponentListMap; { auto CI = ComponentLists.begin(); for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE; ++DI, ++CI) { assert(!CI->empty() && "Invalid component list!"); ComponentListMap[*DI].push_back(*CI); } } // Iterators of the target storage. auto UniqueDeclarations = getUniqueDeclsRef(); auto UDI = UniqueDeclarations.begin(); auto DeclNumLists = getDeclNumListsRef(); auto DNLI = DeclNumLists.begin(); auto ComponentListSizes = getComponentListSizesRef(); auto CLSI = ComponentListSizes.begin(); auto Components = getComponentsRef(); auto CI = Components.begin(); // Variable to compute the accumulation of the number of components. unsigned PrevSize = 0u; // Scan all the declarations and associated component lists. for (auto &M : ComponentListMap) { // The declaration. auto *D = M.first; // The component lists. auto CL = M.second; // Initialize the entry. *UDI = D; ++UDI; *DNLI = CL.size(); ++DNLI; // Obtain the cumulative sizes and concatenate all the components in the // reserved storage. for (auto C : CL) { // Accumulate with the previous size. PrevSize += C.size(); // Save the size. *CLSI = PrevSize; ++CLSI; // Append components after the current components iterator. CI = std::copy(C.begin(), C.end(), CI); } } } public: /// Return the number of unique base declarations in this clause. unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; } /// Return the number of lists derived from the clause expressions. unsigned getTotalComponentListNum() const { return NumComponentLists; } /// Return the total number of components in all lists derived from the /// clause. unsigned getTotalComponentsNum() const { return NumComponents; } /// Iterator that browse the components by lists. It also allows /// browsing components of a single declaration. class const_component_lists_iterator : public llvm::iterator_adaptor_base< const_component_lists_iterator, MappableExprComponentListRef::const_iterator, std::forward_iterator_tag, MappableComponent, ptrdiff_t, MappableComponent, MappableComponent> { // The declaration the iterator currently refers to. ArrayRef<ValueDecl *>::iterator DeclCur; // The list number associated with the current declaration. ArrayRef<unsigned>::iterator NumListsCur; // Remaining lists for the current declaration. unsigned RemainingLists = 0; // The cumulative size of the previous list, or zero if there is no previous // list. unsigned PrevListSize = 0; // The cumulative sizes of the current list - it will delimit the remaining // range of interest. ArrayRef<unsigned>::const_iterator ListSizeCur; ArrayRef<unsigned>::const_iterator ListSizeEnd; // Iterator to the end of the components storage. MappableExprComponentListRef::const_iterator End; public: /// Construct an iterator that scans all lists. explicit const_component_lists_iterator( ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components) : const_component_lists_iterator::iterator_adaptor_base( Components.begin()), DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()), ListSizeCur(CumulativeListSizes.begin()), ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) { assert(UniqueDecls.size() == DeclsListNum.size() && "Inconsistent number of declarations and list sizes!"); if (!DeclsListNum.empty()) RemainingLists = *NumListsCur; } /// Construct an iterator that scan lists for a given declaration \a /// Declaration. explicit const_component_lists_iterator( const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components) : const_component_lists_iterator(UniqueDecls, DeclsListNum, CumulativeListSizes, Components) { // Look for the desired declaration. While we are looking for it, we // update the state so that we know the component where a given list // starts. for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) { if (*DeclCur == Declaration) break; assert(*NumListsCur > 0 && "No lists associated with declaration??"); // Skip the lists associated with the current declaration, but save the // last list size that was skipped. std::advance(ListSizeCur, *NumListsCur - 1); PrevListSize = *ListSizeCur; ++ListSizeCur; } // If we didn't find any declaration, advance the iterator to after the // last component and set remaining lists to zero. if (ListSizeCur == CumulativeListSizes.end()) { this->I = End; RemainingLists = 0u; return; } // Set the remaining lists with the total number of lists of the current // declaration. RemainingLists = *NumListsCur; // Adjust the list size end iterator to the end of the relevant range. ListSizeEnd = ListSizeCur; std::advance(ListSizeEnd, RemainingLists); // Given that the list sizes are cumulative, the index of the component // that start the list is the size of the previous list. std::advance(this->I, PrevListSize); } // Return the array with the current list. The sizes are cumulative, so the // array size is the difference between the current size and previous one. std::pair<const ValueDecl *, MappableExprComponentListRef> operator*() const { assert(ListSizeCur != ListSizeEnd && "Invalid iterator!"); return std::make_pair( *DeclCur, MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize)); } std::pair<const ValueDecl *, MappableExprComponentListRef> operator->() const { return **this; } // Skip the components of the current list. const_component_lists_iterator &operator++() { assert(ListSizeCur != ListSizeEnd && RemainingLists && "Invalid iterator!"); // If we don't have more lists just skip all the components. Otherwise, // advance the iterator by the number of components in the current list. if (std::next(ListSizeCur) == ListSizeEnd) { this->I = End; RemainingLists = 0; } else { std::advance(this->I, *ListSizeCur - PrevListSize); PrevListSize = *ListSizeCur; // We are done with a declaration, move to the next one. if (!(--RemainingLists)) { ++DeclCur; ++NumListsCur; RemainingLists = *NumListsCur; assert(RemainingLists && "No lists in the following declaration??"); } } ++ListSizeCur; return *this; } }; using const_component_lists_range = llvm::iterator_range<const_component_lists_iterator>; /// Iterators for all component lists. const_component_lists_iterator component_lists_begin() const { return const_component_lists_iterator( getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef()); } const_component_lists_iterator component_lists_end() const { return const_component_lists_iterator( ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(), MappableExprComponentListRef(getComponentsRef().end(), getComponentsRef().end())); } const_component_lists_range component_lists() const { return {component_lists_begin(), component_lists_end()}; } /// Iterators for component lists associated with the provided /// declaration. const_component_lists_iterator decl_component_lists_begin(const ValueDecl *VD) const { return const_component_lists_iterator( VD, getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef()); } const_component_lists_iterator decl_component_lists_end() const { return component_lists_end(); } const_component_lists_range decl_component_lists(const ValueDecl *VD) const { return {decl_component_lists_begin(VD), decl_component_lists_end()}; } /// Iterators to access all the declarations, number of lists, list sizes, and /// components. using const_all_decls_iterator = ArrayRef<ValueDecl *>::iterator; using const_all_decls_range = llvm::iterator_range<const_all_decls_iterator>; const_all_decls_range all_decls() const { auto A = getUniqueDeclsRef(); return const_all_decls_range(A.begin(), A.end()); } using const_all_num_lists_iterator = ArrayRef<unsigned>::iterator; using const_all_num_lists_range = llvm::iterator_range<const_all_num_lists_iterator>; const_all_num_lists_range all_num_lists() const { auto A = getDeclNumListsRef(); return const_all_num_lists_range(A.begin(), A.end()); } using const_all_lists_sizes_iterator = ArrayRef<unsigned>::iterator; using const_all_lists_sizes_range = llvm::iterator_range<const_all_lists_sizes_iterator>; const_all_lists_sizes_range all_lists_sizes() const { auto A = getComponentListSizesRef(); return const_all_lists_sizes_range(A.begin(), A.end()); } using const_all_components_iterator = ArrayRef<MappableComponent>::iterator; using const_all_components_range = llvm::iterator_range<const_all_components_iterator>; const_all_components_range all_components() const { auto A = getComponentsRef(); return const_all_components_range(A.begin(), A.end()); } }; /// This represents clause 'map' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target map(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause 'map' /// with the variables 'a' and 'b'. class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>, private llvm::TrailingObjects< OMPMapClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Number of allowed map-type-modifiers. static constexpr unsigned NumberOfModifiers = OMPC_MAP_MODIFIER_last - OMPC_MAP_MODIFIER_unknown - 1; private: /// Map-type-modifiers for the 'map' clause. OpenMPMapModifierKind MapTypeModifiers[NumberOfModifiers] = { OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown }; /// Location of map-type-modifiers for the 'map' clause. SourceLocation MapTypeModifiersLoc[NumberOfModifiers]; /// Map type for the 'map' clause. OpenMPMapClauseKind MapType = OMPC_MAP_unknown; /// Is this an implicit map type or not. bool MapTypeIsImplicit = false; /// Location of the map type. SourceLocation MapLoc; /// Colon location. SourceLocation ColonLoc; /// Build a clause for \a NumVars listed expressions, \a /// NumUniqueDeclarations declarations, \a NumComponentLists total component /// lists, and \a NumComponents total expression components. /// /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Locations of map-type-modifiers. /// \param MapType Map type. /// \param MapTypeIsImplicit Map type is inferred implicitly. /// \param MapLoc Location of the map type. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. explicit OMPMapClause(ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, OpenMPMapClauseKind MapType, bool MapTypeIsImplicit, SourceLocation MapLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_map, StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents), MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) { assert(llvm::array_lengthof(MapTypeModifiers) == MapModifiers.size() && "Unexpected number of map type modifiers."); llvm::copy(MapModifiers, std::begin(MapTypeModifiers)); assert(llvm::array_lengthof(MapTypeModifiersLoc) == MapModifiersLoc.size() && "Unexpected number of map type modifier locations."); llvm::copy(MapModifiersLoc, std::begin(MapTypeModifiersLoc)); } /// Build an empty clause. /// /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. explicit OMPMapClause(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause( OMPC_map, SourceLocation(), SourceLocation(), SourceLocation(), NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// Set map-type-modifier for the clause. /// /// \param I index for map-type-modifier. /// \param T map-type-modifier for the clause. void setMapTypeModifier(unsigned I, OpenMPMapModifierKind T) { assert(I < NumberOfModifiers && "Unexpected index to store map type modifier, exceeds array size."); MapTypeModifiers[I] = T; } /// Set location for the map-type-modifier. /// /// \param I index for map-type-modifier location. /// \param TLoc map-type-modifier location. void setMapTypeModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfModifiers && "Index to store map type modifier location exceeds array size."); MapTypeModifiersLoc[I] = TLoc; } /// Set type for the clause. /// /// \param T Type for the clause. void setMapType(OpenMPMapClauseKind T) { MapType = T; } /// Set type location. /// /// \param TLoc Type location. void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Location of map-type-modifiers. /// \param Type Map type. /// \param TypeIsImplicit Map type is inferred implicitly. /// \param TypeLoc Location of the map type. static OMPMapClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc); /// Creates an empty clause with the place for \a NumVars original /// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists /// lists, and \a NumComponents expression components. /// /// \param C AST context. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of unique base declarations in this /// clause. /// \param NumComponents Total number of expression components in the clause. static OMPMapClause *CreateEmpty(const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents); /// Fetches mapping kind for the clause. OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; } /// Is this an implicit map type? /// We have to capture 'IsMapTypeImplicit' from the parser for more /// informative error messages. It helps distinguish map(r) from /// map(tofrom: r), which is important to print more helpful error /// messages for some target directives. bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; } /// Fetches the map-type-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for map-type-modifier. OpenMPMapModifierKind getMapTypeModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfModifiers && "Requested modifier exceeds the total number of modifiers."); return MapTypeModifiers[Cnt]; } /// Fetches the map-type-modifier location at 'Cnt' index of array of /// modifiers' locations. /// /// \param Cnt index for map-type-modifier location. SourceLocation getMapTypeModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfModifiers && "Requested modifier location exceeds total number of modifiers."); return MapTypeModifiersLoc[Cnt]; } /// Fetches ArrayRef of map-type-modifiers. ArrayRef<OpenMPMapModifierKind> getMapTypeModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiers); } /// Fetches ArrayRef of location of map-type-modifiers. ArrayRef<SourceLocation> getMapTypeModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiersLoc); } /// Fetches location of clause mapping kind. SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range( reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_map; } }; /// This represents 'num_teams' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams num_teams(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'num_teams' /// with single expression 'n'. class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// NumTeams number. Stmt *NumTeams = nullptr; /// Set the NumTeams number. /// /// \param E NumTeams number. void setNumTeams(Expr *E) { NumTeams = E; } public: /// Build 'num_teams' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_teams, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTeams(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPNumTeamsClause() : OMPClause(OMPC_num_teams, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return NumTeams number. Expr *getNumTeams() { return cast<Expr>(NumTeams); } /// Return NumTeams number. Expr *getNumTeams() const { return cast<Expr>(NumTeams); } child_range children() { return child_range(&NumTeams, &NumTeams + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_teams; } }; /// This represents 'thread_limit' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams thread_limit(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'thread_limit' /// with single expression 'n'. class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// ThreadLimit number. Stmt *ThreadLimit = nullptr; /// Set the ThreadLimit number. /// /// \param E ThreadLimit number. void setThreadLimit(Expr *E) { ThreadLimit = E; } public: /// Build 'thread_limit' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPThreadLimitClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_thread_limit, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPThreadLimitClause() : OMPClause(OMPC_thread_limit, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return ThreadLimit number. Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); } /// Return ThreadLimit number. Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); } child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_thread_limit; } }; /// This represents 'priority' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task priority(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'priority' with /// single expression 'n'. class OMPPriorityClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Priority number. Stmt *Priority = nullptr; /// Set the Priority number. /// /// \param E Priority number. void setPriority(Expr *E) { Priority = E; } public: /// Build 'priority' clause. /// /// \param E Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPPriorityClause(Expr *E, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_priority, StartLoc, EndLoc), LParenLoc(LParenLoc), Priority(E) {} /// Build an empty clause. OMPPriorityClause() : OMPClause(OMPC_priority, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return Priority number. Expr *getPriority() { return cast<Expr>(Priority); } /// Return Priority number. Expr *getPriority() const { return cast<Expr>(Priority); } child_range children() { return child_range(&Priority, &Priority + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_priority; } }; /// This represents 'grainsize' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop grainsize(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'grainsize' /// with single expression '4'. class OMPGrainsizeClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Grainsize = nullptr; /// Set safelen. void setGrainsize(Expr *Size) { Grainsize = Size; } public: /// Build 'grainsize' clause. /// /// \param Size Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_grainsize, StartLoc, EndLoc), LParenLoc(LParenLoc), Grainsize(Size) {} /// Build an empty clause. explicit OMPGrainsizeClause() : OMPClause(OMPC_grainsize, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); } child_range children() { return child_range(&Grainsize, &Grainsize + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_grainsize; } }; /// This represents 'nogroup' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp taskloop nogroup /// \endcode /// In this example directive '#pragma omp taskloop' has 'nogroup' clause. class OMPNogroupClause : public OMPClause { public: /// Build 'nogroup' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(OMPC_nogroup, StartLoc, EndLoc) {} /// Build an empty clause. OMPNogroupClause() : OMPClause(OMPC_nogroup, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_nogroup; } }; /// This represents 'num_tasks' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop num_tasks(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'num_tasks' /// with single expression '4'. class OMPNumTasksClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *NumTasks = nullptr; /// Set safelen. void setNumTasks(Expr *Size) { NumTasks = Size; } public: /// Build 'num_tasks' clause. /// /// \param Size Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNumTasksClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_num_tasks, StartLoc, EndLoc), LParenLoc(LParenLoc), NumTasks(Size) {} /// Build an empty clause. explicit OMPNumTasksClause() : OMPClause(OMPC_num_tasks, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); } child_range children() { return child_range(&NumTasks, &NumTasks + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_num_tasks; } }; /// This represents 'hint' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp critical (name) hint(6) /// \endcode /// In this example directive '#pragma omp critical' has name 'name' and clause /// 'hint' with argument '6'. class OMPHintClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Hint expression of the 'hint' clause. Stmt *Hint = nullptr; /// Set hint expression. void setHint(Expr *H) { Hint = H; } public: /// Build 'hint' clause with expression \a Hint. /// /// \param Hint Hint expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc), Hint(Hint) {} /// Build an empty clause. OMPHintClause() : OMPClause(OMPC_hint, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getHint() const { return cast_or_null<Expr>(Hint); } child_range children() { return child_range(&Hint, &Hint + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_hint; } }; /// This represents 'dist_schedule' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp distribute dist_schedule(static, 3) /// \endcode /// In this example directive '#pragma omp distribute' has 'dist_schedule' /// clause with arguments 'static' and '3'. class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPDistScheduleClauseKind Kind = OMPC_DIST_SCHEDULE_unknown; /// Start location of the schedule kind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'dist_schedule' clause with schedule kind \a Kind and chunk /// size expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind DistSchedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize) : OMPClause(OMPC_dist_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); } /// Build an empty clause. explicit OMPDistScheduleClause() : OMPClause(OMPC_dist_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Get kind of the clause. OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDistScheduleKindLoc() { return KindLoc; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_dist_schedule; } }; /// This represents 'defaultmap' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp target defaultmap(tofrom: scalar) /// \endcode /// In this example directive '#pragma omp target' has 'defaultmap' clause of kind /// 'scalar' with modifier 'tofrom'. class OMPDefaultmapClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Modifiers for 'defaultmap' clause. OpenMPDefaultmapClauseModifier Modifier = OMPC_DEFAULTMAP_MODIFIER_unknown; /// Locations of modifiers. SourceLocation ModifierLoc; /// A kind of the 'defaultmap' clause. OpenMPDefaultmapClauseKind Kind = OMPC_DEFAULTMAP_unknown; /// Start location of the defaultmap kind in source code. SourceLocation KindLoc; /// Set defaultmap kind. /// /// \param K Defaultmap kind. void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; } /// Set the defaultmap modifier. /// /// \param M Defaultmap modifier. void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) { Modifier = M; } /// Set location of the defaultmap modifier. void setDefaultmapModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set defaultmap kind start location. /// /// \param KLoc Defaultmap kind location. void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } public: /// Build 'defaultmap' clause with defaultmap kind \a Kind /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param EndLoc Ending location of the clause. /// \param Kind Defaultmap kind. /// \param M The modifier applied to 'defaultmap' clause. /// \param MLoc Location of the modifier OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KLoc, SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind, OpenMPDefaultmapClauseModifier M) : OMPClause(OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc), Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {} /// Build an empty clause. explicit OMPDefaultmapClause() : OMPClause(OMPC_defaultmap, SourceLocation(), SourceLocation()) {} /// Get kind of the clause. OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; } /// Get the modifier of the clause. OpenMPDefaultmapClauseModifier getDefaultmapModifier() const { return Modifier; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDefaultmapKindLoc() { return KindLoc; } /// Get the modifier location. SourceLocation getDefaultmapModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_defaultmap; } }; /// This represents clause 'to' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update to(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' /// with the variables 'a' and 'b'. class OMPToClause final : public OMPMappableExprListClause<OMPToClause>, private llvm::TrailingObjects< OMPToClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. explicit OMPToClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_to, StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// Build an empty clause. /// /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. explicit OMPToClause(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause( OMPC_to, SourceLocation(), SourceLocation(), SourceLocation(), NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPToClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of unique base declarations in this /// clause. /// \param NumComponents Total number of expression components in the clause. static OMPToClause *CreateEmpty(const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_to; } }; /// This represents clause 'from' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update from(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'from' /// with the variables 'a' and 'b'. class OMPFromClause final : public OMPMappableExprListClause<OMPFromClause>, private llvm::TrailingObjects< OMPFromClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. explicit OMPFromClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_from, StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// Build an empty clause. /// /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. explicit OMPFromClause(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause( OMPC_from, SourceLocation(), SourceLocation(), SourceLocation(), NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPFromClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of unique base declarations in this /// clause. /// \param NumComponents Total number of expression components in the clause. static OMPFromClause *CreateEmpty(const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_from; } }; /// This represents clause 'use_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_ptr' with the variables 'a' and 'b'. class OMPUseDevicePtrClause final : public OMPMappableExprListClause<OMPUseDevicePtrClause>, private llvm::TrailingObjects< OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. explicit OMPUseDevicePtrClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_use_device_ptr, StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// Build an empty clause. /// /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. explicit OMPUseDevicePtrClause(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_use_device_ptr, SourceLocation(), SourceLocation(), SourceLocation(), NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return 3 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// Sets the list of references to private copies with initializers for new /// private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for new /// private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new private /// variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new private /// variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param Vars The original expression used in the clause. /// \param PrivateVars Expressions referring to private copies. /// \param Inits Expressions referring to private copy initializers. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDevicePtrClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars, ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of unique base declarations in this /// clause. /// \param NumComponents Total number of expression components in the clause. static OMPUseDevicePtrClause *CreateEmpty(const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_use_device_ptr; } }; /// This represents clause 'is_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target is_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause /// 'is_device_ptr' with the variables 'a' and 'b'. class OMPIsDevicePtrClause final : public OMPMappableExprListClause<OMPIsDevicePtrClause>, private llvm::TrailingObjects< OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. explicit OMPIsDevicePtrClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_is_device_ptr, StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// Build an empty clause. /// /// \param NumVars Number of expressions listed in this clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of component lists in this clause. /// \param NumComponents Total number of expression components in the clause. explicit OMPIsDevicePtrClause(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : OMPMappableExprListClause(OMPC_is_device_ptr, SourceLocation(), SourceLocation(), SourceLocation(), NumVars, NumUniqueDeclarations, NumComponentLists, NumComponents) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPIsDevicePtrClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of expressions listed in the clause. /// \param NumUniqueDeclarations Number of unique base declarations in this /// clause. /// \param NumComponentLists Number of unique base declarations in this /// clause. /// \param NumComponents Total number of expression components in the clause. static OMPIsDevicePtrClause *CreateEmpty(const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } static bool classof(const OMPClause *T) { return T->getClauseKind() == OMPC_is_device_ptr; } }; /// This class implements a simple visitor for OMPClause /// subclasses. template<class ImplClass, template <typename> class Ptr, typename RetTy> class OMPClauseVisitorBase { public: #define PTR(CLASS) typename Ptr<CLASS>::type #define DISPATCH(CLASS) \ return static_cast<ImplClass*>(this)->Visit##CLASS(static_cast<PTR(CLASS)>(S)) #define OPENMP_CLAUSE(Name, Class) \ RetTy Visit ## Class (PTR(Class) S) { DISPATCH(Class); } #include "clang/Basic/OpenMPKinds.def" RetTy Visit(PTR(OMPClause) S) { // Top switch clause: visit each OMPClause. switch (S->getClauseKind()) { default: llvm_unreachable("Unknown clause kind!"); #define OPENMP_CLAUSE(Name, Class) \ case OMPC_ ## Name : return Visit ## Class(static_cast<PTR(Class)>(S)); #include "clang/Basic/OpenMPKinds.def" } } // Base case, ignore it. :) RetTy VisitOMPClause(PTR(OMPClause) Node) { return RetTy(); } #undef PTR #undef DISPATCH }; template <typename T> using const_ptr = typename std::add_pointer<typename std::add_const<T>::type>; template<class ImplClass, typename RetTy = void> class OMPClauseVisitor : public OMPClauseVisitorBase <ImplClass, std::add_pointer, RetTy> {}; template<class ImplClass, typename RetTy = void> class ConstOMPClauseVisitor : public OMPClauseVisitorBase <ImplClass, const_ptr, RetTy> {}; class OMPClausePrinter final : public OMPClauseVisitor<OMPClausePrinter> { raw_ostream &OS; const PrintingPolicy &Policy; /// Process clauses with list of variables. template <typename T> void VisitOMPClauseList(T *Node, char StartSym); public: OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy) : OS(OS), Policy(Policy) {} #define OPENMP_CLAUSE(Name, Class) void Visit##Class(Class *S); #include "clang/Basic/OpenMPKinds.def" }; } // namespace clang #endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
ps.c
/*** Some usefull math macros ***/ #define SIGN(a,b) ((b) >= 0.0 ? fabs(a) : -fabs(a)) static double mnarg1,mnarg2; #define FMAX(a,b) (mnarg1=(a),mnarg2=(b),(mnarg1) > (mnarg2) ?\ (mnarg1) : (mnarg2)) static double mnarg1,mnarg2; #define FMIN(a,b) (mnarg1=(a),mnarg2=(b),(mnarg1) < (mnarg2) ?\ (mnarg1) : (mnarg2)) #define ERFC_NPTS (int) 75 #define ERFC_PARAM_DELTA (float) 0.1 static double log_erfc_table[ERFC_NPTS], erfc_params[ERFC_NPTS]; static gsl_interp_accel *erfc_acc; static gsl_spline *erfc_spline; #define NGaussLegendre 40 //defines the number of points in the Gauss-Legendre quadrature integration #define NMass 300 #define NSFR_high 200 #define NSFR_low 250 #define NGL_SFR 100 // 100 #define NMTURN 50//100 #define LOG10_MTURN_MAX ((double)(10)) #define LOG10_MTURN_MIN ((double)(5.-9e-8)) #define NR_END 1 #define FREE_ARG char* #define MM 7 #define NSTACK 50 #define EPS2 3.0e-11 #define Luv_over_SFR (double)(1./1.15/1e-28) // Luv/SFR = 1 / 1.15 x 10^-28 [M_solar yr^-1/erg s^-1 Hz^-1] // G. Sun and S. R. Furlanetto (2016) MNRAS, 417, 33 #define delta_lnMhalo (double)(5e-6) #define Mhalo_min (double)(1e6) #define Mhalo_max (double)(1e16) float calibrated_NF_min; double *deltaz, *deltaz_smoothed, *NeutralFractions, *z_Q, *Q_value, *nf_vals, *z_vals; int N_NFsamples,N_extrapolated, N_analytic, N_calibrated, N_deltaz; bool initialised_ComputeLF = false; gsl_interp_accel *LF_spline_acc; gsl_spline *LF_spline; gsl_interp_accel *deriv_spline_acc; gsl_spline *deriv_spline; struct CosmoParams *cosmo_params_ps; struct UserParams *user_params_ps; struct FlagOptions *flag_options_ps; //double sigma_norm, R, theta_cmb, omhh, z_equality, y_d, sound_horizon, alpha_nu, f_nu, f_baryon, beta_c, d2fact, R_CUTOFF, DEL_CURR, SIG_CURR; double sigma_norm, theta_cmb, omhh, z_equality, y_d, sound_horizon, alpha_nu, f_nu, f_baryon, beta_c, d2fact, R_CUTOFF, DEL_CURR, SIG_CURR; float MinMass, mass_bin_width, inv_mass_bin_width; double sigmaparam_FgtrM_bias(float z, float sigsmallR, float del_bias, float sig_bias); float *Mass_InterpTable, *Sigma_InterpTable, *dSigmadm_InterpTable; float *log10_overdense_spline_SFR, *log10_Nion_spline, *Overdense_spline_SFR, *Nion_spline; float *prev_log10_overdense_spline_SFR, *prev_log10_Nion_spline, *prev_Overdense_spline_SFR, *prev_Nion_spline; float *Mturns, *Mturns_MINI; float *log10_Nion_spline_MINI, *Nion_spline_MINI; float *prev_log10_Nion_spline_MINI, *prev_Nion_spline_MINI; float *xi_SFR,*wi_SFR, *xi_SFR_Xray, *wi_SFR_Xray; float *overdense_high_table, *overdense_low_table, *log10_overdense_low_table; float **log10_SFRD_z_low_table, **SFRD_z_high_table; float **log10_SFRD_z_low_table_MINI, **SFRD_z_high_table_MINI; double *lnMhalo_param, *Muv_param, *Mhalo_param; double *log10phi, *M_uv_z, *M_h_z; double *lnMhalo_param_MINI, *Muv_param_MINI, *Mhalo_param_MINI; double *log10phi_MINI; *M_uv_z_MINI, *M_h_z_MINI; double *deriv, *lnM_temp, *deriv_temp; double *z_val, *z_X_val, *Nion_z_val, *SFRD_val; double *Nion_z_val_MINI, *SFRD_val_MINI; void initialiseSigmaMInterpTable(float M_Min, float M_Max); void freeSigmaMInterpTable(); void initialiseGL_Nion(int n, float M_Min, float M_Max); void initialiseGL_Nion_Xray(int n, float M_Min, float M_Max); float Mass_limit (float logM, float PL, float FRAC); void bisection(float *x, float xlow, float xup, int *iter); float Mass_limit_bisection(float Mmin, float Mmax, float PL, float FRAC); double sheth_delc(double del, double sig); float dNdM_conditional(float growthf, float M1, float M2, float delta1, float delta2, float sigma2); double dNion_ConditionallnM(double lnM, void *params); double Nion_ConditionalM(double growthf, double M1, double M2, double sigma2, double delta1, double delta2, double MassTurnover, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc, bool FAST_FCOLL_TABLES); double dNion_ConditionallnM_MINI(double lnM, void *params); double Nion_ConditionalM_MINI(double growthf, double M1, double M2, double sigma2, double delta1, double delta2, double MassTurnover, double MassTurnover_upper, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc, bool FAST_FCOLL_TABLES); float GaussLegendreQuad_Nion(int Type, int n, float growthf, float M2, float sigma2, float delta1, float delta2, float MassTurnover, float Alpha_star, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, bool FAST_FCOLL_TABLES); float GaussLegendreQuad_Nion_MINI(int Type, int n, float growthf, float M2, float sigma2, float delta1, float delta2, float MassTurnover, float MassTurnover_upper, float Alpha_star, float Alpha_esc, float Fstar7_MINI, float Fesc7_MINI, float Mlim_Fstar_MINI, float Mlim_Fesc_MINI, bool FAST_FCOLL_TABLES); //JBM: Exact integral for power-law indices non zero (for zero it's erfc) double Fcollapprox (double numin, double beta); int n_redshifts_1DTable; double zmin_1DTable, zmax_1DTable, zbin_width_1DTable; double *FgtrM_1DTable_linear; static gsl_interp_accel *Q_at_z_spline_acc; static gsl_spline *Q_at_z_spline; static gsl_interp_accel *z_at_Q_spline_acc; static gsl_spline *z_at_Q_spline; static double Zmin, Zmax, Qmin, Qmax; void Q_at_z(double z, double *splined_value); void z_at_Q(double Q, double *splined_value); static gsl_interp_accel *deltaz_spline_for_photoncons_acc; static gsl_spline *deltaz_spline_for_photoncons; static gsl_interp_accel *NFHistory_spline_acc; static gsl_spline *NFHistory_spline; static gsl_interp_accel *z_NFHistory_spline_acc; static gsl_spline *z_NFHistory_spline; void initialise_NFHistory_spline(double *redshifts, double *NF_estimate, int NSpline); void z_at_NFHist(double xHI_Hist, double *splined_value); void NFHist_at_z(double z, double *splined_value); //int nbin; //double *z_Q, *Q_value, *Q_z, *z_value; double FinalNF_Estimate, FirstNF_Estimate; struct parameters_gsl_FgtrM_int_{ double z_obs; double gf_obs; }; struct parameters_gsl_SFR_General_int_{ double z_obs; double gf_obs; double Mdrop; double Mdrop_upper; double pl_star; double pl_esc; double frac_star; double frac_esc; double LimitMass_Fstar; double LimitMass_Fesc; }; struct parameters_gsl_SFR_con_int_{ double gf_obs; double Mval; double sigma2; double delta1; double delta2; double Mdrop; double Mdrop_upper; double pl_star; double pl_esc; double frac_star; double frac_esc; double LimitMass_Fstar; double LimitMass_Fesc; }; unsigned long *lvector(long nl, long nh); void free_lvector(unsigned long *v, long nl, long nh); float *vector(long nl, long nh); void free_vector(float *v, long nl, long nh); void spline(float x[], float y[], int n, float yp1, float ypn, float y2[]); void splint(float xa[], float ya[], float y2a[], int n, float x, float *y); void gauleg(float x1, float x2, float x[], float w[], int n); /***** FUNCTION PROTOTYPES *****/ double init_ps(); /* initialize global variables, MUST CALL THIS FIRST!!! returns R_CUTOFF */ void free_ps(); /* deallocates the gsl structures from init_ps */ double sigma_z0(double M); //calculates sigma at z=0 (no dicke) double power_in_k(double k); /* Returns the value of the linear power spectrum density (i.e. <|delta_k|^2>/V) at a given k mode at z=0 */ double TFmdm(double k); //Eisenstein & Hu power spectrum transfer function void TFset_parameters(); double TF_CLASS(double k, int flag_int, int flag_dv); //transfer function of matter (flag_dv=0) and relative velocities (flag_dv=1) fluctuations from CLASS double power_in_vcb(double k); /* Returns the value of the DM-b relative velocity power spectrum density (i.e. <|delta_k|^2>/V) at a given k mode at z=0 */ double FgtrM(double z, double M); double FgtrM_wsigma(double z, double sig); double FgtrM_st(double z, double M); double FgtrM_Watson(double growthf, double M); double FgtrM_Watson_z(double z, double growthf, double M); double FgtrM_General(double z, double M); float erfcc(float x); double splined_erfc(double x); double M_J_WDM(); void Broadcast_struct_global_PS(struct UserParams *user_params, struct CosmoParams *cosmo_params){ cosmo_params_ps = cosmo_params; user_params_ps = user_params; } /* this function reads the z=0 matter (CDM+baryons) and relative velocity transfer functions from CLASS (from a file) flag_int = 0 to initialize interpolator, flag_int = -1 to free memory, flag_int = else to interpolate. flag_dv = 0 to output density, flag_dv = 1 to output velocity. similar to built-in function "double T_RECFAST(float z, int flag)" */ double TF_CLASS(double k, int flag_int, int flag_dv) { static double kclass[CLASS_LENGTH], Tmclass[CLASS_LENGTH], Tvclass_vcb[CLASS_LENGTH]; static gsl_interp_accel *acc_density, *acc_vcb; static gsl_spline *spline_density, *spline_vcb; float trash, currk, currTm, currTv; double ans; int i; int gsl_status; FILE *F; char filename[500]; sprintf(filename,"%s/%s",global_params.external_table_path,CLASS_FILENAME); if (flag_int == 0) { // Initialize vectors and read file if (!(F = fopen(filename, "r"))) { LOG_ERROR("Unable to open file: %s for reading.", filename); Throw(IOError); } int nscans; for (i = 0; i < CLASS_LENGTH; i++) { nscans = fscanf(F, "%e %e %e ", &currk, &currTm, &currTv); if (nscans != 3) { LOG_ERROR("Reading CLASS Transfer Function failed."); Throw(IOError); } kclass[i] = currk; Tmclass[i] = currTm; Tvclass_vcb[i] = currTv; if (i > 0 && kclass[i] <= kclass[i - 1]) { LOG_WARNING("Tk table not ordered"); LOG_WARNING("k=%.1le kprev=%.1le", kclass[i], kclass[i - 1]); } } fclose(F); LOG_SUPER_DEBUG("Read CLASS Transfer file"); gsl_set_error_handler_off(); // Set up spline table for densities acc_density = gsl_interp_accel_alloc (); spline_density = gsl_spline_alloc (gsl_interp_cspline, CLASS_LENGTH); gsl_status = gsl_spline_init(spline_density, kclass, Tmclass, CLASS_LENGTH); GSL_ERROR(gsl_status); LOG_SUPER_DEBUG("Generated CLASS Density Spline."); //Set up spline table for velocities acc_vcb = gsl_interp_accel_alloc (); spline_vcb = gsl_spline_alloc (gsl_interp_cspline, CLASS_LENGTH); gsl_status = gsl_spline_init(spline_vcb, kclass, Tvclass_vcb, CLASS_LENGTH); GSL_ERROR(gsl_status); LOG_SUPER_DEBUG("Generated CLASS velocity Spline."); return 0; } else if (flag_int == -1) { gsl_spline_free (spline_density); gsl_interp_accel_free(acc_density); gsl_spline_free (spline_vcb); gsl_interp_accel_free(acc_vcb); return 0; } if (k > kclass[CLASS_LENGTH-1]) { // k>kmax LOG_WARNING("Called TF_CLASS with k=%f, larger than kmax! Returning value at kmax.", k); if(flag_dv == 0){ // output is density return (Tmclass[CLASS_LENGTH]/kclass[CLASS_LENGTH-1]/kclass[CLASS_LENGTH-1]); } else if(flag_dv == 1){ // output is rel velocity return (Tvclass_vcb[CLASS_LENGTH]/kclass[CLASS_LENGTH-1]/kclass[CLASS_LENGTH-1]); } //we just set it to the last value, since sometimes it wants large k for R<<cell_size, which does not matter much. } else { // Do spline if(flag_dv == 0){ // output is density ans = gsl_spline_eval (spline_density, k, acc_density); } else if(flag_dv == 1){ // output is relative velocity ans = gsl_spline_eval (spline_vcb, k, acc_vcb); } else{ ans=0.0; //neither densities not velocities? } } return ans/k/k; //we have to divide by k^2 to agree with the old-fashioned convention. } // FUNCTION sigma_z0(M) // Returns the standard deviation of the normalized, density excess (delta(x)) field, // smoothed on the comoving scale of M (see filter definitions for M<->R conversion). // The sigma is evaluated at z=0, with the time evolution contained in the dicke(z) factor, // i.e. sigma(M,z) = sigma_z0(m) * dicke(z) // normalized so that sigma_z0(M->8/h Mpc) = SIGMA8 in ../Parameter_files/COSMOLOGY.H // NOTE: volume is normalized to = 1, so this is equvalent to the mass standard deviation // M is in solar masses // References: Padmanabhan, pg. 210, eq. 5.107 double dsigma_dk(double k, void *params){ double p, w, T, gamma, q, aa, bb, cc, kR; // get the power spectrum.. choice of 5: if (user_params_ps->POWER_SPECTRUM == 0){ // Eisenstein & Hu T = TFmdm(k); // check if we should cuttoff power spectrum according to Bode et al. 2000 transfer function if (global_params.P_CUTOFF) T *= pow(1 + pow(BODE_e*k*R_CUTOFF, 2*BODE_v), -BODE_n/BODE_v); p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; } else if (user_params_ps->POWER_SPECTRUM == 1){ // BBKS gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm)); q = k / (cosmo_params_ps->hlittle*gamma); T = (log(1.0+2.34*q)/(2.34*q)) * pow( 1.0+3.89*q + pow(16.1*q, 2) + pow( 5.46*q, 3) + pow(6.71*q, 4), -0.25); p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; } else if (user_params_ps->POWER_SPECTRUM == 2){ // Efstathiou,G., Bond,J.R., and White,S.D.M., MNRAS,258,1P (1992) gamma = 0.25; aa = 6.4/(cosmo_params_ps->hlittle*gamma); bb = 3.0/(cosmo_params_ps->hlittle*gamma); cc = 1.7/(cosmo_params_ps->hlittle*gamma); p = pow(k, cosmo_params_ps->POWER_INDEX) / pow( 1+pow( aa*k + pow(bb*k, 1.5) + pow(cc*k,2), 1.13), 2.0/1.13 ); } else if (user_params_ps->POWER_SPECTRUM == 3){ // Peebles, pg. 626 gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm)); aa = 8.0 / (cosmo_params_ps->hlittle*gamma); bb = 4.7 / pow(cosmo_params_ps->hlittle*gamma, 2); p = pow(k, cosmo_params_ps->POWER_INDEX) / pow(1 + aa*k + bb*k*k, 2); } else if (user_params_ps->POWER_SPECTRUM == 4){ // White, SDM and Frenk, CS, 1991, 379, 52 gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm)); aa = 1.7/(cosmo_params_ps->hlittle*gamma); bb = 9.0/pow(cosmo_params_ps->hlittle*gamma, 1.5); cc = 1.0/pow(cosmo_params_ps->hlittle*gamma, 2); p = pow(k, cosmo_params_ps->POWER_INDEX) * 19400.0 / pow(1 + aa*k + bb*pow(k, 1.5) + cc*k*k, 2); } else if (user_params_ps->POWER_SPECTRUM == 5){ // output of CLASS T = TF_CLASS(k, 1, 0); //read from z=0 output of CLASS. Note, flag_int = 1 here always, since now we have to have initialized the interpolator for CLASS p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; if(user_params_ps->USE_RELATIVE_VELOCITIES) { //jbm:Add average relvel suppression p *= 1.0 - A_VCB_PM*exp( -pow(log(k/KP_VCB_PM),2.0)/(2.0*SIGMAK_VCB_PM*SIGMAK_VCB_PM)); //for v=vrms } } else{ LOG_ERROR("No such power spectrum defined: %i. Output is bogus.", user_params_ps->POWER_SPECTRUM); Throw(ValueError); } double Radius; Radius = *(double *)params; kR = k*Radius; if ( (global_params.FILTER == 0) || (sigma_norm < 0) ){ // top hat if ( (kR) < 1.0e-4 ){ w = 1.0;} // w converges to 1 as (kR) -> 0 else { w = 3.0 * (sin(kR)/pow(kR, 3) - cos(kR)/pow(kR, 2));} } else if (global_params.FILTER == 1){ // gaussian of width 1/R w = pow(E, -kR*kR/2.0); } else { LOG_ERROR("No such filter: %i. Output is bogus.", global_params.FILTER); Throw(ValueError); } return k*k*p*w*w; } double sigma_z0(double M){ double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = FRACT_FLOAT_ERR*10; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); double kstart, kend; double Radius; // R = MtoR(M); Radius = MtoR(M); // now lets do the integral for sigma and scale it with sigma_norm if(user_params_ps->POWER_SPECTRUM == 5){ kstart = fmax(1.0e-99/Radius, KBOT_CLASS); kend = fmin(350.0/Radius, KTOP_CLASS); }//we establish a maximum k of KTOP_CLASS~1e3 Mpc-1 and a minimum at KBOT_CLASS,~1e-5 Mpc-1 since the CLASS transfer function has a max! else{ kstart = 1.0e-99/Radius; kend = 350.0/Radius; } lower_limit = kstart;//log(kstart); upper_limit = kend;//log(kend); F.function = &dsigma_dk; F.params = &Radius; int status; gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("gsl integration error occured!"); LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error); LOG_ERROR("data: M=%e",M); GSL_ERROR(status); } gsl_integration_workspace_free (w); return sigma_norm * sqrt(result); } // FUNCTION TFmdm is the power spectrum transfer function from Eisenstein & Hu ApJ, 1999, 511, 5 double TFmdm(double k){ double q, gamma_eff, q_eff, TF_m, q_nu; q = k*pow(theta_cmb,2)/omhh; gamma_eff=sqrt(alpha_nu) + (1.0-sqrt(alpha_nu))/(1.0+pow(0.43*k*sound_horizon, 4)); q_eff = q/gamma_eff; TF_m= log(E+1.84*beta_c*sqrt(alpha_nu)*q_eff); TF_m /= TF_m + pow(q_eff,2) * (14.4 + 325.0/(1.0+60.5*pow(q_eff,1.11))); q_nu = 3.92*q/sqrt(f_nu/N_nu); TF_m *= 1.0 + (1.2*pow(f_nu,0.64)*pow(N_nu,0.3+0.6*f_nu)) / (pow(q_nu,-1.6)+pow(q_nu,0.8)); return TF_m; } void TFset_parameters(){ double z_drag, R_drag, R_equality, p_c, p_cb, f_c, f_cb, f_nub, k_equality; LOG_DEBUG("Setting Transfer Function parameters."); z_equality = 25000*omhh*pow(theta_cmb, -4) - 1.0; k_equality = 0.0746*omhh/(theta_cmb*theta_cmb); z_drag = 0.313*pow(omhh,-0.419) * (1 + 0.607*pow(omhh, 0.674)); z_drag = 1 + z_drag*pow(cosmo_params_ps->OMb*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle, 0.238*pow(omhh, 0.223)); z_drag *= 1291 * pow(omhh, 0.251) / (1 + 0.659*pow(omhh, 0.828)); y_d = (1 + z_equality) / (1.0 + z_drag); R_drag = 31.5 * cosmo_params_ps->OMb*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle * pow(theta_cmb, -4) * 1000 / (1.0 + z_drag); R_equality = 31.5 * cosmo_params_ps->OMb*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle * pow(theta_cmb, -4) * 1000 / (1.0 + z_equality); sound_horizon = 2.0/3.0/k_equality * sqrt(6.0/R_equality) * log( (sqrt(1+R_drag) + sqrt(R_drag+R_equality)) / (1.0 + sqrt(R_equality)) ); p_c = -(5 - sqrt(1 + 24*(1 - f_nu-f_baryon)))/4.0; p_cb = -(5 - sqrt(1 + 24*(1 - f_nu)))/4.0; f_c = 1 - f_nu - f_baryon; f_cb = 1 - f_nu; f_nub = f_nu+f_baryon; alpha_nu = (f_c/f_cb) * (2*(p_c+p_cb)+5)/(4*p_cb+5.0); alpha_nu *= 1 - 0.553*f_nub+0.126*pow(f_nub,3); alpha_nu /= 1-0.193*sqrt(f_nu)+0.169*f_nu; alpha_nu *= pow(1+y_d, p_c-p_cb); alpha_nu *= 1+ (p_cb-p_c)/2.0 * (1.0+1.0/(4.0*p_c+3.0)/(4.0*p_cb+7.0))/(1.0+y_d); beta_c = 1.0/(1.0-0.949*f_nub); } // Returns the value of the linear power spectrum DENSITY (i.e. <|delta_k|^2>/V) // at a given k mode linearly extrapolated to z=0 double power_in_k(double k){ double p, T, gamma, q, aa, bb, cc; // get the power spectrum.. choice of 5: if (user_params_ps->POWER_SPECTRUM == 0){ // Eisenstein & Hu T = TFmdm(k); // check if we should cuttoff power spectrum according to Bode et al. 2000 transfer function if (global_params.P_CUTOFF) T *= pow(1 + pow(BODE_e*k*R_CUTOFF, 2*BODE_v), -BODE_n/BODE_v); p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; //p = pow(k, POWER_INDEX - 0.05*log(k/0.05)) * T * T; //running, alpha=0.05 } else if (user_params_ps->POWER_SPECTRUM == 1){ // BBKS gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm)); q = k / (cosmo_params_ps->hlittle*gamma); T = (log(1.0+2.34*q)/(2.34*q)) * pow( 1.0+3.89*q + pow(16.1*q, 2) + pow( 5.46*q, 3) + pow(6.71*q, 4), -0.25); p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; } else if (user_params_ps->POWER_SPECTRUM == 2){ // Efstathiou,G., Bond,J.R., and White,S.D.M., MNRAS,258,1P (1992) gamma = 0.25; aa = 6.4/(cosmo_params_ps->hlittle*gamma); bb = 3.0/(cosmo_params_ps->hlittle*gamma); cc = 1.7/(cosmo_params_ps->hlittle*gamma); p = pow(k, cosmo_params_ps->POWER_INDEX) / pow( 1+pow( aa*k + pow(bb*k, 1.5) + pow(cc*k,2), 1.13), 2.0/1.13 ); } else if (user_params_ps->POWER_SPECTRUM == 3){ // Peebles, pg. 626 gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb)/(cosmo_params_ps->OMm)); aa = 8.0 / (cosmo_params_ps->hlittle*gamma); bb = 4.7 / pow(cosmo_params_ps->hlittle*gamma, 2); p = pow(k, cosmo_params_ps->POWER_INDEX) / pow(1 + aa*k + bb*k*k, 2); } else if (user_params_ps->POWER_SPECTRUM == 4){ // White, SDM and Frenk, CS, 1991, 379, 52 gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb/cosmo_params_ps->OMm)); aa = 1.7/(cosmo_params_ps->hlittle*gamma); bb = 9.0/pow(cosmo_params_ps->hlittle*gamma, 1.5); cc = 1.0/pow(cosmo_params_ps->hlittle*gamma, 2); p = pow(k, cosmo_params_ps->POWER_INDEX) * 19400.0 / pow(1 + aa*k + bb*pow(k, 1.5) + cc*k*k, 2); } else if (user_params_ps->POWER_SPECTRUM == 5){ // output of CLASS T = TF_CLASS(k, 1, 0); //read from z=0 output of CLASS. Note, flag_int = 1 here always, since now we have to have initialized the interpolator for CLASS p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; if(user_params_ps->USE_RELATIVE_VELOCITIES) { //jbm:Add average relvel suppression p *= 1.0 - A_VCB_PM*exp( -pow(log(k/KP_VCB_PM),2.0)/(2.0*SIGMAK_VCB_PM*SIGMAK_VCB_PM)); //for v=vrms } } else{ LOG_ERROR("No such power spectrum defined: %i. Output is bogus.", user_params_ps->POWER_SPECTRUM); Throw(ValueError); } return p*TWOPI*PI*sigma_norm*sigma_norm; } /* Returns the value of the linear power spectrum of the DM-b relative velocity at kinematic decoupling (which we set at zkin=1010) */ double power_in_vcb(double k){ double p, T, gamma, q, aa, bb, cc; //only works if using CLASS if (user_params_ps->POWER_SPECTRUM == 5){ // CLASS T = TF_CLASS(k, 1, 1); //read from CLASS file. flag_int=1 since we have initialized before, flag_vcb=1 for velocity p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; } else{ LOG_ERROR("Cannot get P_cb unless using CLASS: %i\n Set USE_RELATIVE_VELOCITIES 0 or use CLASS.\n", user_params_ps->POWER_SPECTRUM); Throw(ValueError); } return p*TWOPI*PI*sigma_norm*sigma_norm; } double init_ps(){ double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = FRACT_FLOAT_ERR*10; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); double kstart, kend; //we start the interpolator if using CLASS: if (user_params_ps->POWER_SPECTRUM == 5){ LOG_DEBUG("Setting CLASS Transfer Function inits."); TF_CLASS(1.0, 0, 0); } // Set cuttoff scale for WDM (eq. 4 in Barkana et al. 2001) in comoving Mpc R_CUTOFF = 0.201*pow((cosmo_params_ps->OMm-cosmo_params_ps->OMb)*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle/0.15, 0.15)*pow(global_params.g_x/1.5, -0.29)*pow(global_params.M_WDM, -1.15); omhh = cosmo_params_ps->OMm*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle; theta_cmb = T_cmb / 2.7; // Translate Parameters into forms GLOBALVARIABLES form f_nu = global_params.OMn/cosmo_params_ps->OMm; f_baryon = cosmo_params_ps->OMb/cosmo_params_ps->OMm; if (f_nu < TINY) f_nu = 1e-10; if (f_baryon < TINY) f_baryon = 1e-10; TFset_parameters(); sigma_norm = -1; double Radius_8; Radius_8 = 8.0/cosmo_params_ps->hlittle; if(user_params_ps->POWER_SPECTRUM == 5){ kstart = fmax(1.0e-99/Radius_8, KBOT_CLASS); kend = fmin(350.0/Radius_8, KTOP_CLASS); }//we establish a maximum k of KTOP_CLASS~1e3 Mpc-1 and a minimum at KBOT_CLASS,~1e-5 Mpc-1 since the CLASS transfer function has a max! else{ kstart = 1.0e-99/Radius_8; kend = 350.0/Radius_8; } lower_limit = kstart; upper_limit = kend; LOG_DEBUG("Initializing Power Spectrum with lower_limit=%e, upper_limit=%e, rel_tol=%e, radius_8=%g", lower_limit,upper_limit, rel_tol, Radius_8); F.function = &dsigma_dk; F.params = &Radius_8; int status; gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("gsl integration error occured!"); LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error); GSL_ERROR(status); } gsl_integration_workspace_free (w); LOG_DEBUG("Initialized Power Spectrum."); sigma_norm = cosmo_params_ps->SIGMA_8/sqrt(result); //takes care of volume factor return R_CUTOFF; } //function to free arrays related to the power spectrum void free_ps(){ //we free the PS interpolator if using CLASS: if (user_params_ps->POWER_SPECTRUM == 5){ TF_CLASS(1.0, -1, 0); } return; } /* FUNCTION dsigmasqdm_z0(M) returns d/dm (sigma^2) (see function sigma), in units of Msun^-1 */ double dsigmasq_dm(double k, void *params){ double p, w, T, gamma, q, aa, bb, cc, dwdr, drdm, kR; // get the power spectrum.. choice of 5: if (user_params_ps->POWER_SPECTRUM == 0){ // Eisenstein & Hu ApJ, 1999, 511, 5 T = TFmdm(k); // check if we should cuttoff power spectrum according to Bode et al. 2000 transfer function if (global_params.P_CUTOFF) T *= pow(1 + pow(BODE_e*k*R_CUTOFF, 2*BODE_v), -BODE_n/BODE_v); p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; //p = pow(k, POWER_INDEX - 0.05*log(k/0.05)) * T * T; //running, alpha=0.05 } else if (user_params_ps->POWER_SPECTRUM == 1){ // BBKS gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb)/(cosmo_params_ps->OMm)); q = k / (cosmo_params_ps->hlittle*gamma); T = (log(1.0+2.34*q)/(2.34*q)) * pow( 1.0+3.89*q + pow(16.1*q, 2) + pow( 5.46*q, 3) + pow(6.71*q, 4), -0.25); p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; } else if (user_params_ps->POWER_SPECTRUM == 2){ // Efstathiou,G., Bond,J.R., and White,S.D.M., MNRAS,258,1P (1992) gamma = 0.25; aa = 6.4/(cosmo_params_ps->hlittle*gamma); bb = 3.0/(cosmo_params_ps->hlittle*gamma); cc = 1.7/(cosmo_params_ps->hlittle*gamma); p = pow(k, cosmo_params_ps->POWER_INDEX) / pow( 1+pow( aa*k + pow(bb*k, 1.5) + pow(cc*k,2), 1.13), 2.0/1.13 ); } else if (user_params_ps->POWER_SPECTRUM == 3){ // Peebles, pg. 626 gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb)/(cosmo_params_ps->OMm)); aa = 8.0 / (cosmo_params_ps->hlittle*gamma); bb = 4.7 / (cosmo_params_ps->hlittle*gamma); p = pow(k, cosmo_params_ps->POWER_INDEX) / pow(1 + aa*k + bb*k*k, 2); } else if (user_params_ps->POWER_SPECTRUM == 4){ // White, SDM and Frenk, CS, 1991, 379, 52 gamma = cosmo_params_ps->OMm * cosmo_params_ps->hlittle * pow(E, -(cosmo_params_ps->OMb) - (cosmo_params_ps->OMb)/(cosmo_params_ps->OMm)); aa = 1.7/(cosmo_params_ps->hlittle*gamma); bb = 9.0/pow(cosmo_params_ps->hlittle*gamma, 1.5); cc = 1.0/pow(cosmo_params_ps->hlittle*gamma, 2); p = pow(k, cosmo_params_ps->POWER_INDEX) * 19400.0 / pow(1 + aa*k + pow(bb*k, 1.5) + cc*k*k, 2); } else if (user_params_ps->POWER_SPECTRUM == 5){ // JBM: CLASS T = TF_CLASS(k, 1, 0); //read from z=0 output of CLASS p = pow(k, cosmo_params_ps->POWER_INDEX) * T * T; if(user_params_ps->USE_RELATIVE_VELOCITIES) { //jbm:Add average relvel suppression p *= 1.0 - A_VCB_PM*exp( -pow(log(k/KP_VCB_PM),2.0)/(2.0*SIGMAK_VCB_PM*SIGMAK_VCB_PM)); //for v=vrms } } else{ LOG_ERROR("No such power spectrum defined: %i. Output is bogus.", user_params_ps->POWER_SPECTRUM); Throw(ValueError); } double Radius; Radius = *(double *)params; // now get the value of the window function kR = k * Radius; if (global_params.FILTER == 0){ // top hat if ( (kR) < 1.0e-4 ){ w = 1.0; }// w converges to 1 as (kR) -> 0 else { w = 3.0 * (sin(kR)/pow(kR, 3) - cos(kR)/pow(kR, 2));} // now do d(w^2)/dm = 2 w dw/dr dr/dm if ( (kR) < 1.0e-10 ){ dwdr = 0;} else{ dwdr = 9*cos(kR)*k/pow(kR,3) + 3*sin(kR)*(1 - 3/(kR*kR))/(kR*Radius);} //3*k*( 3*cos(kR)/pow(kR,3) + sin(kR)*(-3*pow(kR, -4) + 1/(kR*kR)) );} // dwdr = -1e8 * k / (R*1e3); drdm = 1.0 / (4.0*PI * cosmo_params_ps->OMm*RHOcrit * Radius*Radius); } else if (global_params.FILTER == 1){ // gaussian of width 1/R w = pow(E, -kR*kR/2.0); dwdr = - k*kR * w; drdm = 1.0 / (pow(2*PI, 1.5) * cosmo_params_ps->OMm*RHOcrit * 3*Radius*Radius); } else { LOG_ERROR("No such filter: %i. Output is bogus.", global_params.FILTER); Throw(ValueError); } // return k*k*p*2*w*dwdr*drdm * d2fact; return k*k*p*2*w*dwdr*drdm; } double dsigmasqdm_z0(double M){ double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = FRACT_FLOAT_ERR*10; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); double kstart, kend; double Radius; // R = MtoR(M); Radius = MtoR(M); // now lets do the integral for sigma and scale it with sigma_norm if(user_params_ps->POWER_SPECTRUM == 5){ kstart = fmax(1.0e-99/Radius, KBOT_CLASS); kend = fmin(350.0/Radius, KTOP_CLASS); }//we establish a maximum k of KTOP_CLASS~1e3 Mpc-1 and a minimum at KBOT_CLASS,~1e-5 Mpc-1 since the CLASS transfer function has a max! else{ kstart = 1.0e-99/Radius; kend = 350.0/Radius; } lower_limit = kstart;//log(kstart); upper_limit = kend;//log(kend); if (user_params_ps->POWER_SPECTRUM == 5){ // for CLASS we do not need to renormalize the sigma integral. d2fact=1.0; } else { d2fact = M*10000/sigma_z0(M); } F.function = &dsigmasq_dm; F.params = &Radius; int status; gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol,1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("gsl integration error occured!"); LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error); LOG_ERROR("data: M=%e",M); GSL_ERROR(status); } gsl_integration_workspace_free (w); // return sigma_norm * sigma_norm * result /d2fact; return sigma_norm * sigma_norm * result; } /* sheth correction to delta crit */ double sheth_delc(double del, double sig){ return sqrt(SHETH_a)*del*(1. + global_params.SHETH_b*pow(sig*sig/(SHETH_a*del*del), global_params.SHETH_c)); } /* FUNCTION dNdM_st(z, M) Computes the Press_schechter mass function with Sheth-Torman correction for ellipsoidal collapse at redshift z, and dark matter halo mass M (in solar masses). Uses interpolated sigma and dsigmadm to be computed faster. Necessary for mass-dependent ionising efficiencies. The return value is the number density per unit mass of halos in the mass range M to M+dM in units of: comoving Mpc^-3 Msun^-1 Reference: Sheth, Mo, Torman 2001 */ double dNdM_st(double growthf, double M){ double sigma, dsigmadm, nuhat; float MassBinLow; int MassBin; if(user_params_ps->USE_INTERPOLATION_TABLES) { MassBin = (int)floor( (log(M) - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma = Sigma_InterpTable[MassBin] + ( log(M) - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = dSigmadm_InterpTable[MassBin] + ( log(M) - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = -pow(10.,dsigmadm); } else { sigma = sigma_z0(M); dsigmadm = dsigmasqdm_z0(M); } sigma = sigma * growthf; dsigmadm = dsigmadm * (growthf*growthf/(2.*sigma)); nuhat = sqrt(SHETH_a) * Deltac / sigma; return (-(cosmo_params_ps->OMm)*RHOcrit/M) * (dsigmadm/sigma) * sqrt(2./PI)*SHETH_A * (1+ pow(nuhat, -2*SHETH_p)) * nuhat * pow(E, -nuhat*nuhat/2.0); } /* FUNCTION dNdM_WatsonFOF(z, M) Computes the Press_schechter mass function with Warren et al. 2011 correction for ellipsoidal collapse at redshift z, and dark matter halo mass M (in solar masses). The Universial FOF function (Eq. 12) of Watson et al. 2013 The return value is the number density per unit mass of halos in the mass range M to M+dM in units of: comoving Mpc^-3 Msun^-1 Reference: Watson et al. 2013 */ double dNdM_WatsonFOF(double growthf, double M){ double sigma, dsigmadm, f_sigma; float MassBinLow; int MassBin; if(user_params_ps->USE_INTERPOLATION_TABLES) { MassBin = (int)floor( (log(M) - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma = Sigma_InterpTable[MassBin] + ( log(M) - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = dSigmadm_InterpTable[MassBin] + ( log(M) - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = -pow(10.,dsigmadm); } else { sigma = sigma_z0(M); dsigmadm = dsigmasqdm_z0(M); } sigma = sigma * growthf; dsigmadm = dsigmadm * (growthf*growthf/(2.*sigma)); f_sigma = Watson_A * ( pow( Watson_beta/sigma, Watson_alpha) + 1. ) * exp( - Watson_gamma/(sigma*sigma) ); return (-(cosmo_params_ps->OMm)*RHOcrit/M) * (dsigmadm/sigma) * f_sigma; } /* FUNCTION dNdM_WatsonFOF_z(z, M) Computes the Press_schechter mass function with Warren et al. 2011 correction for ellipsoidal collapse at redshift z, and dark matter halo mass M (in solar masses). The Universial FOF function, with redshift evolution (Eq. 12 - 15) of Watson et al. 2013. The return value is the number density per unit mass of halos in the mass range M to M+dM in units of: comoving Mpc^-3 Msun^-1 Reference: Watson et al. 2013 */ double dNdM_WatsonFOF_z(double z, double growthf, double M){ double sigma, dsigmadm, A_z, alpha_z, beta_z, Omega_m_z, f_sigma; float MassBinLow; int MassBin; if(user_params_ps->USE_INTERPOLATION_TABLES) { MassBin = (int)floor( (log(M) - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma = Sigma_InterpTable[MassBin] + ( log(M) - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = dSigmadm_InterpTable[MassBin] + ( log(M) - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = -pow(10.,dsigmadm); } else { sigma = sigma_z0(M); dsigmadm = dsigmasqdm_z0(M); } sigma = sigma * growthf; dsigmadm = dsigmadm * (growthf*growthf/(2.*sigma)); Omega_m_z = (cosmo_params_ps->OMm)*pow(1.+z,3.) / ( (cosmo_params_ps->OMl) + (cosmo_params_ps->OMm)*pow(1.+z,3.) + (global_params.OMr)*pow(1.+z,4.) ); A_z = Omega_m_z * ( Watson_A_z_1 * pow(1. + z, Watson_A_z_2 ) + Watson_A_z_3 ); alpha_z = Omega_m_z * ( Watson_alpha_z_1 * pow(1.+z, Watson_alpha_z_2 ) + Watson_alpha_z_3 ); beta_z = Omega_m_z * ( Watson_beta_z_1 * pow(1.+z, Watson_beta_z_2 ) + Watson_beta_z_3 ); f_sigma = A_z * ( pow(beta_z/sigma, alpha_z) + 1. ) * exp( - Watson_gamma_z/(sigma*sigma) ); return (-(cosmo_params_ps->OMm)*RHOcrit/M) * (dsigmadm/sigma) * f_sigma; } /* FUNCTION dNdM(growthf, M) Computes the Press_schechter mass function at redshift z (using the growth factor), and dark matter halo mass M (in solar masses). Uses interpolated sigma and dsigmadm to be computed faster. Necessary for mass-dependent ionising efficiencies. The return value is the number density per unit mass of halos in the mass range M to M+dM in units of: comoving Mpc^-3 Msun^-1 Reference: Padmanabhan, pg. 214 */ double dNdM(double growthf, double M){ double sigma, dsigmadm; float MassBinLow; int MassBin; if(user_params_ps->USE_INTERPOLATION_TABLES) { MassBin = (int)floor( (log(M) - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma = Sigma_InterpTable[MassBin] + ( log(M) - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = dSigmadm_InterpTable[MassBin] + ( log(M) - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = -pow(10.,dsigmadm); } else { sigma = sigma_z0(M); dsigmadm = dsigmasqdm_z0(M); } sigma = sigma * growthf; dsigmadm = dsigmadm * (growthf*growthf/(2.*sigma)); return (-(cosmo_params_ps->OMm)*RHOcrit/M) * sqrt(2/PI) * (Deltac/(sigma*sigma)) * dsigmadm * pow(E, -(Deltac*Deltac)/(2*sigma*sigma)); } /* FUNCTION FgtrM(z, M) Computes the fraction of mass contained in haloes with mass > M at redshift z */ double FgtrM(double z, double M){ double del, sig; del = Deltac/dicke(z); //regular spherical collapse delta sig = sigma_z0(M); return splined_erfc(del / (sqrt(2)*sig)); } /* FUNCTION FgtrM_wsigma(z, sigma_z0(M)) Computes the fraction of mass contained in haloes with mass > M at redshift z. Requires sigma_z0(M) rather than M to make certain heating integrals faster */ double FgtrM_wsigma(double z, double sig){ double del; del = Deltac/dicke(z); //regular spherical collapse delta return splined_erfc(del / (sqrt(2)*sig)); } /* FUNCTION FgtrM_Watson(z, M) Computes the fraction of mass contained in haloes with mass > M at redshift z Uses Watson et al (2013) correction */ double dFdlnM_Watson_z (double lnM, void *params){ struct parameters_gsl_FgtrM_int_ vals = *(struct parameters_gsl_FgtrM_int_ *)params; double M = exp(lnM); double z = vals.z_obs; double growthf = vals.gf_obs; return dNdM_WatsonFOF_z(z, growthf, M) * M * M; } double FgtrM_Watson_z(double z, double growthf, double M){ double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = 0.001; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); F.function = &dFdlnM_Watson_z; struct parameters_gsl_FgtrM_int_ parameters_gsl_FgtrM = { .z_obs = z, .gf_obs = growthf, }; F.params = &parameters_gsl_FgtrM; lower_limit = log(M); upper_limit = log(fmax(global_params.M_MAX_INTEGRAL, M*100)); int status; gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("gsl integration error occured!"); LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error); LOG_ERROR("data: z=%e growthf=%e M=%e",z,growthf,M); GSL_ERROR(status); } gsl_integration_workspace_free (w); return result / (cosmo_params_ps->OMm*RHOcrit); } /* FUNCTION FgtrM_Watson(z, M) Computes the fraction of mass contained in haloes with mass > M at redshift z Uses Watson et al (2013) correction */ double dFdlnM_Watson (double lnM, void *params){ double growthf = *(double *)params; double M = exp(lnM); return dNdM_WatsonFOF(growthf, M) * M * M; } double FgtrM_Watson(double growthf, double M){ double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = 0.001; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); F.function = &dFdlnM_Watson; F.params = &growthf; lower_limit = log(M); upper_limit = log(fmax(global_params.M_MAX_INTEGRAL, M*100)); int status; gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("gsl integration error occured!"); LOG_ERROR("lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error); LOG_ERROR("data: growthf=%e M=%e",growthf,M); GSL_ERROR(status); } gsl_integration_workspace_free (w); return result / (cosmo_params_ps->OMm*RHOcrit); } double dFdlnM_General(double lnM, void *params){ struct parameters_gsl_FgtrM_int_ vals = *(struct parameters_gsl_FgtrM_int_ *)params; double M = exp(lnM); double z = vals.z_obs; double growthf = vals.gf_obs; double MassFunction; if(user_params_ps->HMF==0) { MassFunction = dNdM(growthf, M); } if(user_params_ps->HMF==1) { MassFunction = dNdM_st(growthf, M); } if(user_params_ps->HMF==2) { MassFunction = dNdM_WatsonFOF(growthf, M); } if(user_params_ps->HMF==3) { MassFunction = dNdM_WatsonFOF_z(z, growthf, M); } return MassFunction * M * M; } /* FUNCTION FgtrM_General(z, M) Computes the fraction of mass contained in haloes with mass > M at redshift z */ double FgtrM_General(double z, double M){ double del, sig, growthf; int status; growthf = dicke(z); struct parameters_gsl_FgtrM_int_ parameters_gsl_FgtrM = { .z_obs = z, .gf_obs = growthf, }; if(user_params_ps->HMF<4 && user_params_ps->HMF>-1) { double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = 0.001; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); F.function = &dFdlnM_General; F.params = &parameters_gsl_FgtrM; lower_limit = log(M); upper_limit = log(fmax(global_params.M_MAX_INTEGRAL, M*100)); gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("gsl integration error occured!"); LOG_ERROR("lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error); LOG_ERROR("data: z=%e growthf=%e M=%e",z,growthf,M); GSL_ERROR(status); } gsl_integration_workspace_free (w); return result / (cosmo_params_ps->OMm*RHOcrit); } else { LOG_ERROR("Incorrect HMF selected: %i (should be between 0 and 3).", user_params_ps->HMF); Throw(ValueError); } } double dNion_General(double lnM, void *params){ struct parameters_gsl_SFR_General_int_ vals = *(struct parameters_gsl_SFR_General_int_ *)params; double M = exp(lnM); double z = vals.z_obs; double growthf = vals.gf_obs; double MassTurnover = vals.Mdrop; double Alpha_star = vals.pl_star; double Alpha_esc = vals.pl_esc; double Fstar10 = vals.frac_star; double Fesc10 = vals.frac_esc; double Mlim_Fstar = vals.LimitMass_Fstar; double Mlim_Fesc = vals.LimitMass_Fesc; double Fstar, Fesc, MassFunction; if (Alpha_star > 0. && M > Mlim_Fstar) Fstar = 1./Fstar10; else if (Alpha_star < 0. && M < Mlim_Fstar) Fstar = 1/Fstar10; else Fstar = pow(M/1e10,Alpha_star); if (Alpha_esc > 0. && M > Mlim_Fesc) Fesc = 1./Fesc10; else if (Alpha_esc < 0. && M < Mlim_Fesc) Fesc = 1./Fesc10; else Fesc = pow(M/1e10,Alpha_esc); if(user_params_ps->HMF==0) { MassFunction = dNdM(growthf, M); } if(user_params_ps->HMF==1) { MassFunction = dNdM_st(growthf,M); } if(user_params_ps->HMF==2) { MassFunction = dNdM_WatsonFOF(growthf, M); } if(user_params_ps->HMF==3) { MassFunction = dNdM_WatsonFOF_z(z, growthf, M); } return MassFunction * M * M * exp(-MassTurnover/M) * Fstar * Fesc; } double Nion_General(double z, double M_Min, double MassTurnover, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc){ double growthf; growthf = dicke(z); double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = 0.001; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); struct parameters_gsl_SFR_General_int_ parameters_gsl_SFR = { .z_obs = z, .gf_obs = growthf, .Mdrop = MassTurnover, .pl_star = Alpha_star, .pl_esc = Alpha_esc, .frac_star = Fstar10, .frac_esc = Fesc10, .LimitMass_Fstar = Mlim_Fstar, .LimitMass_Fesc = Mlim_Fesc, }; int status; if(user_params_ps->HMF<4 && user_params_ps->HMF>-1) { F.function = &dNion_General; F.params = &parameters_gsl_SFR; lower_limit = log(M_Min); upper_limit = log(fmax(global_params.M_MAX_INTEGRAL, M_Min*100)); gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("gsl integration error occured!"); LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error); LOG_ERROR("data: z=%e growthf=%e MassTurnover=%e Alpha_star=%e Alpha_esc=%e",z,growthf,MassTurnover,Alpha_star,Alpha_esc); LOG_ERROR("data: Fstar10=%e Fesc10=%e Mlim_Fstar=%e Mlim_Fesc=%e",Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc); GSL_ERROR(status); } gsl_integration_workspace_free (w); return result / ((cosmo_params_ps->OMm)*RHOcrit); } else { LOG_ERROR("Incorrect HMF selected: %i (should be between 0 and 3).", user_params_ps->HMF); Throw(ValueError); } } double dNion_General_MINI(double lnM, void *params){ struct parameters_gsl_SFR_General_int_ vals = *(struct parameters_gsl_SFR_General_int_ *)params; double M = exp(lnM); double z = vals.z_obs; double growthf = vals.gf_obs; double MassTurnover = vals.Mdrop; double MassTurnover_upper = vals.Mdrop_upper; double Alpha_star = vals.pl_star; double Alpha_esc = vals.pl_esc; double Fstar7_MINI = vals.frac_star; double Fesc7_MINI = vals.frac_esc; double Mlim_Fstar = vals.LimitMass_Fstar; double Mlim_Fesc = vals.LimitMass_Fesc; double Fstar, Fesc, MassFunction; if (Alpha_star > 0. && M > Mlim_Fstar) Fstar = 1./Fstar7_MINI; else if (Alpha_star < 0. && M < Mlim_Fstar) Fstar = 1/Fstar7_MINI; else Fstar = pow(M/1e7,Alpha_star); if (Alpha_esc > 0. && M > Mlim_Fesc) Fesc = 1./Fesc7_MINI; else if (Alpha_esc < 0. && M < Mlim_Fesc) Fesc = 1./Fesc7_MINI; else Fesc = pow(M/1e7,Alpha_esc); if(user_params_ps->HMF==0) { MassFunction = dNdM(growthf, M); } if(user_params_ps->HMF==1) { MassFunction = dNdM_st(growthf,M); } if(user_params_ps->HMF==2) { MassFunction = dNdM_WatsonFOF(growthf, M); } if(user_params_ps->HMF==3) { MassFunction = dNdM_WatsonFOF_z(z, growthf, M); } return MassFunction * M * M * exp(-MassTurnover/M) * exp(-M/MassTurnover_upper) * Fstar * Fesc; } double Nion_General_MINI(double z, double M_Min, double MassTurnover, double MassTurnover_upper, double Alpha_star, double Alpha_esc, double Fstar7_MINI, double Fesc7_MINI, double Mlim_Fstar, double Mlim_Fesc){ double growthf; int status; growthf = dicke(z); double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = 0.001; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); struct parameters_gsl_SFR_General_int_ parameters_gsl_SFR = { .z_obs = z, .gf_obs = growthf, .Mdrop = MassTurnover, .Mdrop_upper = MassTurnover_upper, .pl_star = Alpha_star, .pl_esc = Alpha_esc, .frac_star = Fstar7_MINI, .frac_esc = Fesc7_MINI, .LimitMass_Fstar = Mlim_Fstar, .LimitMass_Fesc = Mlim_Fesc, }; if(user_params_ps->HMF<4 && user_params_ps->HMF>-1) { F.function = &dNion_General_MINI; F.params = &parameters_gsl_SFR; lower_limit = log(M_Min); upper_limit = log(fmax(global_params.M_MAX_INTEGRAL, M_Min*100)); gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("gsl integration error occurred!"); LOG_ERROR("lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error); LOG_ERROR("data: z=%e growthf=%e MassTurnover=%e MassTurnover_upper=%e",z,growthf,MassTurnover,MassTurnover_upper); LOG_ERROR("data: Alpha_star=%e Alpha_esc=%e Fstar7_MINI=%e Fesc7_MINI=%e Mlim_Fstar=%e Mlim_Fesc=%e",Alpha_star,Alpha_esc,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar,Mlim_Fesc); GSL_ERROR(status); } gsl_integration_workspace_free (w); return result / ((cosmo_params_ps->OMm)*RHOcrit); } else { LOG_ERROR("Incorrect HMF selected: %i (should be between 0 and 3).", user_params_ps->HMF); Throw(ValueError); } } /* returns the "effective Jeans mass" in Msun corresponding to the gas analog of WDM ; eq. 10 in Barkana+ 2001 */ double M_J_WDM(){ double z_eq, fudge=60; if (!(global_params.P_CUTOFF)) return 0; z_eq = 3600*(cosmo_params_ps->OMm-cosmo_params_ps->OMb)*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle/0.15; return fudge*3.06e8 * (1.5/global_params.g_x) * sqrt((cosmo_params_ps->OMm-cosmo_params_ps->OMb)*cosmo_params_ps->hlittle*cosmo_params_ps->hlittle/0.15) * pow(global_params.M_WDM, -4) * pow(z_eq/3000.0, 1.5); } float erfcc(float x) { double t,q,ans; q=fabs(x); t=1.0/(1.0+0.5*q); ans=t*exp(-q*q-1.2655122+t*(1.0000237+t*(0.374092+t*(0.0967842+ t*(-0.1862881+t*(0.2788681+t*(-1.13520398+t*(1.4885159+ t*(-0.82215223+t*0.17087277))))))))); return x >= 0.0 ? ans : 2.0-ans; } double splined_erfc(double x){ if (x < 0){ return 1.0; } // TODO: This could be wrapped in a Try/Catch to try the fast way and if it doesn't // work, use the slow way. return erfcc(x); // the interpolation below doesn't seem to be stable in Ts.c if (x > ERFC_PARAM_DELTA*(ERFC_NPTS-1)) return erfcc(x); else return exp(gsl_spline_eval(erfc_spline, x, erfc_acc)); } void gauleg(float x1, float x2, float x[], float w[], int n) //Given the lower and upper limits of integration x1 and x2, and given n, this routine returns arrays x[1..n] and w[1..n] of length n, //containing the abscissas and weights of the Gauss- Legendre n-point quadrature formula. { int m,j,i; double z1,z,xm,xl,pp,p3,p2,p1; m=(n+1)/2; xm=0.5*(x2+x1); xl=0.5*(x2-x1); for (i=1;i<=m;i++) { //High precision is a good idea for this routine. //The roots are symmetric in the interval, so we only have to find half of them. //Loop over the desired roots. z=cos(3.141592654*(i-0.25)/(n+0.5)); //Starting with the above approximation to the ith root, we enter the main loop of refinement by Newton’s method. do { p1=1.0; p2=0.0; for (j=1;j<=n;j++) { //Loop up the recurrence relation to get the Legendre polynomial evaluated at z. p3=p2; p2=p1; p1=((2.0*j-1.0)*z*p2-(j-1.0)*p3)/j; } //p1 is now the desired Legendre polynomial. We next compute pp, its derivative, by a standard relation involving also p2, //the polynomial of one lower order. pp=n*(z*p1-p2)/(z*z-1.0); z1=z; z=z1-p1/pp; } while (fabs(z-z1) > EPS2); x[i]=xm-xl*z; x[n+1-i]=xm+xl*z; w[i]=2.0*xl/((1.0-z*z)*pp*pp); w[n+1-i]=w[i]; } } void initialiseSigmaMInterpTable(float M_Min, float M_Max) { int i; float Mass; if (Mass_InterpTable == NULL){ Mass_InterpTable = calloc(NMass,sizeof(float)); Sigma_InterpTable = calloc(NMass,sizeof(float)); dSigmadm_InterpTable = calloc(NMass,sizeof(float)); } #pragma omp parallel shared(Mass_InterpTable,Sigma_InterpTable,dSigmadm_InterpTable) private(i) num_threads(user_params_ps->N_THREADS) { #pragma omp for for(i=0;i<NMass;i++) { Mass_InterpTable[i] = log(M_Min) + (float)i/(NMass-1)*( log(M_Max) - log(M_Min) ); Sigma_InterpTable[i] = sigma_z0(exp(Mass_InterpTable[i])); dSigmadm_InterpTable[i] = log10(-dsigmasqdm_z0(exp(Mass_InterpTable[i]))); } } for(i=0;i<NMass;i++) { if(isfinite(Mass_InterpTable[i]) == 0 || isfinite(Sigma_InterpTable[i]) == 0 || isfinite(dSigmadm_InterpTable[i])==0) { LOG_ERROR("Detected either an infinite or NaN value in initialiseSigmaMInterpTable"); // Throw(ParameterError); Throw(TableGenerationError); } } MinMass = log(M_Min); mass_bin_width = 1./(NMass-1)*( log(M_Max) - log(M_Min) ); inv_mass_bin_width = 1./mass_bin_width; } void freeSigmaMInterpTable() { free(Mass_InterpTable); free(Sigma_InterpTable); free(dSigmadm_InterpTable); Mass_InterpTable = NULL; } void nrerror(char error_text[]) { LOG_ERROR("Numerical Recipes run-time error..."); LOG_ERROR("%s",error_text); Throw(MemoryAllocError); } float *vector(long nl, long nh) /* allocate a float vector with subscript range v[nl..nh] */ { float *v; v = (float *)malloc((size_t) ((nh-nl+1+NR_END)*sizeof(float))); if(!v) nrerror("allocation failure in vector()"); return v - nl + NR_END; } void free_vector(float *v, long nl, long nh) /* free a float vector allocated with vector() */ { free((FREE_ARG) (v+nl-NR_END)); } void spline(float x[], float y[], int n, float yp1, float ypn, float y2[]) /*Given arrays x[1..n] and y[1..n] containing a tabulated function, i.e., yi = f(xi), with x1 <x2 < :: : < xN, and given values yp1 and ypn for the first derivative of the interpolating function at points 1 and n, respectively, this routine returns an array y2[1..n] that contains the second derivatives of the interpolating function at the tabulated points xi. If yp1 and/or ypn are equal to 1e30 or larger, the routine is signaled to set the corresponding boundary condition for a natural spline, with zero second derivative on that boundary.*/ { int i,k; float p,qn,sig,un,*u; int na,nb,check; u=vector(1,n-1); if (yp1 > 0.99e30) // The lower boundary condition is set either to be "natural" y2[1]=u[1]=0.0; else { // or else to have a specified first derivative. y2[1] = -0.5; u[1]=(3.0/(x[2]-x[1]))*((y[2]-y[1])/(x[2]-x[1])-yp1); } for (i=2;i<=n-1;i++) { //This is the decomposition loop of the tridiagonal algorithm. sig=(x[i]-x[i-1])/(x[i+1]-x[i-1]); //y2 and u are used for temporary na = 1; nb = 1; check = 0; while(((float)(x[i+na*1]-x[i-nb*1])==(float)0.0)) { check = check + 1; if(check%2==0) { na = na + 1; } else { nb = nb + 1; } sig=(x[i]-x[i-1])/(x[i+na*1]-x[i-nb*1]); } p=sig*y2[i-1]+2.0; //storage of the decomposed y2[i]=(sig-1.0)/p; // factors. u[i]=(y[i+1]-y[i])/(x[i+1]-x[i]) - (y[i]-y[i-1])/(x[i]-x[i-1]); u[i]=(6.0*u[i]/(x[i+1]-x[i-1])-sig*u[i-1])/p; if(((float)(x[i+1]-x[i])==(float)0.0) || ((float)(x[i]-x[i-1])==(float)0.0)) { na = 0; nb = 0; check = 0; while((float)(x[i+na*1]-x[i-nb])==(float)(0.0) || ((float)(x[i+na]-x[i-nb*1])==(float)0.0)) { check = check + 1; if(check%2==0) { na = na + 1; } else { nb = nb + 1; } } u[i]=(y[i+1]-y[i])/(x[i+na*1]-x[i-nb]) - (y[i]-y[i-1])/(x[i+na]-x[i-nb*1]); u[i]=(6.0*u[i]/(x[i+na*1]-x[i-nb*1])-sig*u[i-1])/p; } } if (ypn > 0.99e30) //The upper boundary condition is set either to be "natural" qn=un=0.0; else { //or else to have a specified first derivative. qn=0.5; un=(3.0/(x[n]-x[n-1]))*(ypn-(y[n]-y[n-1])/(x[n]-x[n-1])); } y2[n]=(un-qn*u[n-1])/(qn*y2[n-1]+1.0); for (k=n-1;k>=1;k--) { //This is the backsubstitution loop of the tridiagonal y2[k]=y2[k]*y2[k+1]+u[k]; //algorithm. } free_vector(u,1,n-1); } void splint(float xa[], float ya[], float y2a[], int n, float x, float *y) /*Given the arrays xa[1..n] and ya[1..n], which tabulate a function (with the xai's in order), and given the array y2a[1..n], which is the output from spline above, and given a value of x, this routine returns a cubic-spline interpolated value y.*/ { void nrerror(char error_text[]); int klo,khi,k; float h,b,a; klo=1; // We will find the right place in the table by means of khi=n; //bisection. This is optimal if sequential calls to this while (khi-klo > 1) { //routine are at random values of x. If sequential calls k=(khi+klo) >> 1; //are in order, and closely spaced, one would do better if (xa[k] > x) khi=k; //to store previous values of klo and khi and test if else klo=k; //they remain appropriate on the next call. } // klo and khi now bracket the input value of x. h=xa[khi]-xa[klo]; if (h == 0.0) nrerror("Bad xa input to routine splint"); //The xa's must be distinct. a=(xa[khi]-x)/h; b=(x-xa[klo])/h; //Cubic spline polynomial is now evaluated. *y=a*ya[klo]+b*ya[khi]+((a*a*a-a)*y2a[klo]+(b*b*b-b)*y2a[khi])*(h*h)/6.0; } unsigned long *lvector(long nl, long nh) /* allocate an unsigned long vector with subscript range v[nl..nh] */ { unsigned long *v; v = (unsigned long *)malloc((size_t) ((nh-nl+1+NR_END)*sizeof(long))); if(!v) nrerror("allocation failure in lvector()"); return v - nl + NR_END; } void free_lvector(unsigned long *v, long nl, long nh) /* free an unsigned long vector allocated with lvector() */ { free((FREE_ARG) (v+nl-NR_END)); } /* dnbiasdM */ double dnbiasdM(double M, float z, double M_o, float del_o){ double sigsq, del, sig_one, sig_o; if ((M_o-M) < TINY){ LOG_ERROR("In function dnbiasdM: M must be less than M_o!\nAborting...\n"); Throw(ValueError); } del = Deltac/dicke(z) - del_o; if (del < 0){ LOG_ERROR(" In function dnbiasdM: del_o must be less than del_1 = del_crit/dicke(z)!\nAborting...\n"); Throw(ValueError); } sig_o = sigma_z0(M_o); sig_one = sigma_z0(M); sigsq = sig_one*sig_one - sig_o*sig_o; return -(RHOcrit*cosmo_params_ps->OMm)/M /sqrt(2*PI) *del*pow(sigsq,-1.5)*pow(E, -0.5*del*del/sigsq)*dsigmasqdm_z0(M); } /* calculates the fraction of mass contained in haloes with mass > M at redshift z, in regions with a linear overdensity of del_bias, and standard deviation sig_bias */ double FgtrM_bias(double z, double M, double del_bias, double sig_bias){ double del, sig, sigsmallR; sigsmallR = sigma_z0(M); if (!(sig_bias < sigsmallR)){ // biased region is smaller that halo! // fprintf(stderr, "FgtrM_bias: Biased region is smaller than halo!\nResult is bogus.\n"); // return 0; return 0.000001; } del = Deltac/dicke(z) - del_bias; sig = sqrt(sigsmallR*sigsmallR - sig_bias*sig_bias); return splined_erfc(del / (sqrt(2)*sig)); } /* Uses sigma parameters instead of Mass for scale */ double sigmaparam_FgtrM_bias(float z, float sigsmallR, float del_bias, float sig_bias){ double del, sig; if (!(sig_bias < sigsmallR)){ // biased region is smaller that halo! // fprintf(stderr, "local_FgtrM_bias: Biased region is smaller than halo!\nResult is bogus.\n"); // return 0; return 0.000001; } del = Deltac/dicke(z) - del_bias; sig = sqrt(sigsmallR*sigsmallR - sig_bias*sig_bias); return splined_erfc(del / (sqrt(2)*sig)); } /* redshift derivative of the growth function at z */ double ddicke_dz(double z){ float dz = 1e-10; double omegaM_z, ddickdz, dick_0, x, x_0, domegaMdz; return (dicke(z+dz)-dicke(z))/dz; } /* compute a mass limit where the stellar baryon fraction and the escape fraction exceed unity */ float Mass_limit (float logM, float PL, float FRAC) { return FRAC*pow(pow(10.,logM)/1e10,PL); } void bisection(float *x, float xlow, float xup, int *iter){ *x=(xlow + xup)/2.; ++(*iter); } float Mass_limit_bisection(float Mmin, float Mmax, float PL, float FRAC){ int i, iter, max_iter=200; float rel_tol=0.001; float logMlow, logMupper, x, x1; iter = 0; logMlow = log10(Mmin); logMupper = log10(Mmax); if (PL < 0.) { if (Mass_limit(logMlow,PL,FRAC) <= 1.) { return Mmin; } } else if (PL > 0.) { if (Mass_limit(logMupper,PL,FRAC) <= 1.) { return Mmax; } } else return 0; bisection(&x, logMlow, logMupper, &iter); do { if((Mass_limit(logMlow,PL,FRAC)-1.)*(Mass_limit(x,PL,FRAC)-1.) < 0.) logMupper = x; else logMlow = x; bisection(&x1, logMlow, logMupper, &iter); if(fabs(x1-x) < rel_tol) { return pow(10.,x1); } x = x1; } while(iter < max_iter); // Got to max_iter without finding a solution. LOG_ERROR("Failed to find a mass limit to regulate stellar fraction/escape fraction is between 0 and 1."); LOG_ERROR(" The solution does not converge or iterations are not sufficient."); // Throw(ParameterError); Throw(MassDepZetaError); return(0.0); } int initialise_ComputeLF(int nbins, struct UserParams *user_params, struct CosmoParams *cosmo_params, struct AstroParams *astro_params, struct FlagOptions *flag_options) { Broadcast_struct_global_PS(user_params,cosmo_params); Broadcast_struct_global_UF(user_params,cosmo_params); lnMhalo_param = calloc(nbins,sizeof(double)); Muv_param = calloc(nbins,sizeof(double)); Mhalo_param = calloc(nbins,sizeof(double)); LF_spline_acc = gsl_interp_accel_alloc(); LF_spline = gsl_spline_alloc(gsl_interp_cspline, nbins); init_ps(); int status; Try initialiseSigmaMInterpTable(0.999*Mhalo_min,1.001*Mhalo_max); Catch(status) { LOG_ERROR("\t...called from initialise_ComputeLF"); return(status); } initialised_ComputeLF = true; return(0); } void cleanup_ComputeLF(){ free(lnMhalo_param); free(Muv_param); free(Mhalo_param); gsl_spline_free (LF_spline); gsl_interp_accel_free(LF_spline_acc); freeSigmaMInterpTable(); initialised_ComputeLF = 0; } int ComputeLF(int nbins, struct UserParams *user_params, struct CosmoParams *cosmo_params, struct AstroParams *astro_params, struct FlagOptions *flag_options, int component, int NUM_OF_REDSHIFT_FOR_LF, float *z_LF, float *M_TURNs, double *M_uv_z, double *M_h_z, double *log10phi) { /* This is an API-level function and thus returns an int status. */ int status; Try{ // This try block covers the whole function. // This NEEDS to be done every time, because the actual object passed in as // user_params, cosmo_params etc. can change on each call, freeing up the memory. initialise_ComputeLF(nbins, user_params,cosmo_params,astro_params,flag_options); int i,i_z; int i_unity, i_smth, mf, nbins_smth=7; double dlnMhalo, lnMhalo_i, SFRparam, Muv_1, Muv_2, dMuvdMhalo; double Mhalo_i, lnMhalo_min, lnMhalo_max, lnMhalo_lo, lnMhalo_hi, dlnM, growthf; double f_duty_upper, Mcrit_atom; float Fstar, Fstar_temp; double dndm; int gsl_status; gsl_set_error_handler_off(); if (astro_params->ALPHA_STAR < -0.5) LOG_WARNING( "ALPHA_STAR is %f, which is unphysical value given the observational LFs.\n"\ "Also, when ALPHA_STAR < -.5, LFs may show a kink. It is recommended to set ALPHA_STAR > -0.5.", astro_params->ALPHA_STAR ); mf = user_params_ps->HMF; lnMhalo_min = log(Mhalo_min*0.999); lnMhalo_max = log(Mhalo_max*1.001); dlnMhalo = (lnMhalo_max - lnMhalo_min)/(double)(nbins - 1); for (i_z=0; i_z<NUM_OF_REDSHIFT_FOR_LF; i_z++) { growthf = dicke(z_LF[i_z]); Mcrit_atom = atomic_cooling_threshold(z_LF[i_z]); i_unity = -1; for (i=0; i<nbins; i++) { // generate interpolation arrays lnMhalo_param[i] = lnMhalo_min + dlnMhalo*(double)i; Mhalo_i = exp(lnMhalo_param[i]); if (component == 1) Fstar = astro_params->F_STAR10*pow(Mhalo_i/1e10,astro_params->ALPHA_STAR); else Fstar = astro_params->F_STAR7_MINI*pow(Mhalo_i/1e7,astro_params->ALPHA_STAR_MINI); if (Fstar > 1.) Fstar = 1; if (i_unity < 0) { // Find the array number at which Fstar crosses unity. if (astro_params->ALPHA_STAR > 0.) { if ( (1.- Fstar) < FRACT_FLOAT_ERR ) i_unity = i; } else if (astro_params->ALPHA_STAR < 0. && i < nbins-1) { if (component == 1) Fstar_temp = astro_params->F_STAR10*pow( exp(lnMhalo_min + dlnMhalo*(double)(i+1))/1e10,astro_params->ALPHA_STAR); else Fstar_temp = astro_params->F_STAR7_MINI*pow( exp(lnMhalo_min + dlnMhalo*(double)(i+1))/1e7,astro_params->ALPHA_STAR_MINI); if (Fstar_temp < 1. && (1.- Fstar) < FRACT_FLOAT_ERR) i_unity = i; } } // parametrization of SFR SFRparam = Mhalo_i * cosmo_params->OMb/cosmo_params->OMm * (double)Fstar * (double)(hubble(z_LF[i_z])*SperYR/astro_params->t_STAR); // units of M_solar/year Muv_param[i] = 51.63 - 2.5*log10(SFRparam*Luv_over_SFR); // UV magnitude // except if Muv value is nan or inf, but avoid error put the value as 10. if ( isinf(Muv_param[i]) || isnan(Muv_param[i]) ) Muv_param[i] = 10.; M_uv_z[i + i_z*nbins] = Muv_param[i]; } gsl_status = gsl_spline_init(LF_spline, lnMhalo_param, Muv_param, nbins); GSL_ERROR(gsl_status); lnMhalo_lo = log(Mhalo_min); lnMhalo_hi = log(Mhalo_max); dlnM = (lnMhalo_hi - lnMhalo_lo)/(double)(nbins - 1); // There is a kink on LFs at which Fstar crosses unity. This kink is a numerical artefact caused by the derivate of dMuvdMhalo. // Most of the cases the kink doesn't appear in magnitude ranges we are interested (e.g. -22 < Muv < -10). However, for some extreme // parameters, it appears. To avoid this kink, we use the interpolation of the derivate in the range where the kink appears. // 'i_unity' is the array number at which the kink appears. 'i_unity-3' and 'i_unity+12' are related to the range of interpolation, // which is an arbitrary choice. // NOTE: This method does NOT work in cases with ALPHA_STAR < -0.5. But, this parameter range is unphysical given that the // observational LFs favour positive ALPHA_STAR in this model. // i_smth = 0: calculates LFs without interpolation. // i_smth = 1: calculates LFs using interpolation where Fstar crosses unity. if (i_unity-3 < 0) i_smth = 0; else if (i_unity+12 > nbins-1) i_smth = 0; else i_smth = 1; if (i_smth == 0) { for (i=0; i<nbins; i++) { // calculate luminosity function lnMhalo_i = lnMhalo_lo + dlnM*(double)i; Mhalo_param[i] = exp(lnMhalo_i); M_h_z[i + i_z*nbins] = Mhalo_param[i]; Muv_1 = gsl_spline_eval(LF_spline, lnMhalo_i - delta_lnMhalo, LF_spline_acc); Muv_2 = gsl_spline_eval(LF_spline, lnMhalo_i + delta_lnMhalo, LF_spline_acc); dMuvdMhalo = (Muv_2 - Muv_1) / (2.*delta_lnMhalo * exp(lnMhalo_i)); if (component == 1) f_duty_upper = 1.; else f_duty_upper = exp(-(Mhalo_param[i]/Mcrit_atom)); if(mf==0) { log10phi[i + i_z*nbins] = log10( dNdM(growthf, exp(lnMhalo_i)) * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / fabs(dMuvdMhalo) ); } else if(mf==1) { log10phi[i + i_z*nbins] = log10( dNdM_st(growthf, exp(lnMhalo_i)) * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / fabs(dMuvdMhalo) ); } else if(mf==2) { log10phi[i + i_z*nbins] = log10( dNdM_WatsonFOF(growthf, exp(lnMhalo_i)) * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / fabs(dMuvdMhalo) ); } else if(mf==3) { log10phi[i + i_z*nbins] = log10( dNdM_WatsonFOF_z(z_LF[i_z], growthf, exp(lnMhalo_i)) * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / fabs(dMuvdMhalo) ); } else{ LOG_ERROR("HMF should be between 0-3, got %d", mf); Throw(ValueError); } if (isinf(log10phi[i + i_z*nbins]) || isnan(log10phi[i + i_z*nbins]) || log10phi[i + i_z*nbins] < -30.) log10phi[i + i_z*nbins] = -30.; } } else { lnM_temp = calloc(nbins_smth,sizeof(double)); deriv_temp = calloc(nbins_smth,sizeof(double)); deriv = calloc(nbins,sizeof(double)); for (i=0; i<nbins; i++) { // calculate luminosity function lnMhalo_i = lnMhalo_lo + dlnM*(double)i; Mhalo_param[i] = exp(lnMhalo_i); M_h_z[i + i_z*nbins] = Mhalo_param[i]; Muv_1 = gsl_spline_eval(LF_spline, lnMhalo_i - delta_lnMhalo, LF_spline_acc); Muv_2 = gsl_spline_eval(LF_spline, lnMhalo_i + delta_lnMhalo, LF_spline_acc); dMuvdMhalo = (Muv_2 - Muv_1) / (2.*delta_lnMhalo * exp(lnMhalo_i)); deriv[i] = fabs(dMuvdMhalo); } deriv_spline_acc = gsl_interp_accel_alloc(); deriv_spline = gsl_spline_alloc(gsl_interp_cspline, nbins_smth); // generate interpolation arrays to smooth discontinuity of the derivative causing a kink // Note that the number of array elements and the range of interpolation are made by arbitrary choices. lnM_temp[0] = lnMhalo_param[i_unity - 3]; lnM_temp[1] = lnMhalo_param[i_unity - 2]; lnM_temp[2] = lnMhalo_param[i_unity + 8]; lnM_temp[3] = lnMhalo_param[i_unity + 9]; lnM_temp[4] = lnMhalo_param[i_unity + 10]; lnM_temp[5] = lnMhalo_param[i_unity + 11]; lnM_temp[6] = lnMhalo_param[i_unity + 12]; deriv_temp[0] = deriv[i_unity - 3]; deriv_temp[1] = deriv[i_unity - 2]; deriv_temp[2] = deriv[i_unity + 8]; deriv_temp[3] = deriv[i_unity + 9]; deriv_temp[4] = deriv[i_unity + 10]; deriv_temp[5] = deriv[i_unity + 11]; deriv_temp[6] = deriv[i_unity + 12]; gsl_status = gsl_spline_init(deriv_spline, lnM_temp, deriv_temp, nbins_smth); GSL_ERROR(gsl_status); for (i=0;i<9;i++){ deriv[i_unity + i - 1] = gsl_spline_eval(deriv_spline, lnMhalo_param[i_unity + i - 1], deriv_spline_acc); } for (i=0; i<nbins; i++) { if (component == 1) f_duty_upper = 1.; else f_duty_upper = exp(-(Mhalo_param[i]/Mcrit_atom)); if(mf==0) dndm = dNdM(growthf, Mhalo_param[i]); else if(mf==1) dndm = dNdM_st(growthf, Mhalo_param[i]); else if(mf==2) dndm = dNdM_WatsonFOF(growthf, Mhalo_param[i]); else if(mf==3) dndm = dNdM_WatsonFOF_z(z_LF[i_z], growthf, Mhalo_param[i]); else{ LOG_ERROR("HMF should be between 0-3, got %d", mf); Throw(ValueError); } log10phi[i + i_z*nbins] = log10(dndm * exp(-(M_TURNs[i_z]/Mhalo_param[i])) * f_duty_upper / deriv[i]); if (isinf(log10phi[i + i_z*nbins]) || isnan(log10phi[i + i_z*nbins]) || log10phi[i + i_z*nbins] < -30.) log10phi[i + i_z*nbins] = -30.; } } } cleanup_ComputeLF(); } // End try Catch(status){ return status; } return(0); } void initialiseGL_Nion_Xray(int n, float M_Min, float M_Max){ //calculates the weightings and the positions for Gauss-Legendre quadrature. gauleg(log(M_Min),log(M_Max),xi_SFR_Xray,wi_SFR_Xray,n); } float dNdM_conditional(float growthf, float M1, float M2, float delta1, float delta2, float sigma2){ float sigma1, dsigmadm,dsigma_val; float MassBinLow; int MassBin; if(user_params_ps->USE_INTERPOLATION_TABLES) { MassBin = (int)floor( (M1 - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma1 = Sigma_InterpTable[MassBin] + ( M1 - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; dsigma_val = dSigmadm_InterpTable[MassBin] + ( M1 - MassBinLow )*( dSigmadm_InterpTable[MassBin+1] - dSigmadm_InterpTable[MassBin] )*inv_mass_bin_width; dsigmadm = -pow(10.,dsigma_val); } else { sigma1 = sigma_z0(exp(M1)); dsigmadm = dsigmasqdm_z0(exp(M1)); } M1 = exp(M1); M2 = exp(M2); sigma1 = sigma1*sigma1; sigma2 = sigma2*sigma2; dsigmadm = dsigmadm/(2.0*sigma1); // This is actually sigma1^{2} as calculated above, however, it should just be sigma1. It cancels with the same factor below. Why I have decided to write it like that I don't know! if((sigma1 > sigma2)) { return -(( delta1 - delta2 )/growthf)*( 2.*sigma1*dsigmadm )*( exp( - ( delta1 - delta2 )*( delta1 - delta2 )/( 2.*growthf*growthf*( sigma1 - sigma2 ) ) ) )/(pow( sigma1 - sigma2, 1.5)); } else if(sigma1==sigma2) { return -(( delta1 - delta2 )/growthf)*( 2.*sigma1*dsigmadm )*( exp( - ( delta1 - delta2 )*( delta1 - delta2 )/( 2.*growthf*growthf*( 1.e-6 ) ) ) )/(pow( 1.e-6, 1.5)); } else { return 0.; } } void initialiseGL_Nion(int n, float M_Min, float M_Max){ //calculates the weightings and the positions for Gauss-Legendre quadrature. gauleg(log(M_Min),log(M_Max),xi_SFR,wi_SFR,n); } double dNion_ConditionallnM_MINI(double lnM, void *params) { struct parameters_gsl_SFR_con_int_ vals = *(struct parameters_gsl_SFR_con_int_ *)params; double M = exp(lnM); // linear scale double growthf = vals.gf_obs; double M2 = vals.Mval; // natural log scale double sigma2 = vals.sigma2; double del1 = vals.delta1; double del2 = vals.delta2; double MassTurnover = vals.Mdrop; double MassTurnover_upper = vals.Mdrop_upper; double Alpha_star = vals.pl_star; double Alpha_esc = vals.pl_esc; double Fstar7_MINI = vals.frac_star; double Fesc7_MINI = vals.frac_esc; double Mlim_Fstar = vals.LimitMass_Fstar; double Mlim_Fesc = vals.LimitMass_Fesc; double Fstar,Fesc; if (Alpha_star > 0. && M > Mlim_Fstar) Fstar = 1./Fstar7_MINI; else if (Alpha_star < 0. && M < Mlim_Fstar) Fstar = 1./Fstar7_MINI; else Fstar = pow(M/1e7,Alpha_star); if (Alpha_esc > 0. && M > Mlim_Fesc) Fesc = 1./Fesc7_MINI; else if (Alpha_esc < 0. && M < Mlim_Fesc) Fesc = 1./Fesc7_MINI; else Fesc = pow(M/1e7,Alpha_esc); return M*exp(-MassTurnover/M)*exp(-M/MassTurnover_upper)*Fstar*Fesc*dNdM_conditional(growthf,log(M),M2,del1,del2,sigma2)/sqrt(2.*PI); } double dNion_ConditionallnM(double lnM, void *params) { struct parameters_gsl_SFR_con_int_ vals = *(struct parameters_gsl_SFR_con_int_ *)params; double M = exp(lnM); // linear scale double growthf = vals.gf_obs; double M2 = vals.Mval; // natural log scale double sigma2 = vals.sigma2; double del1 = vals.delta1; double del2 = vals.delta2; double MassTurnover = vals.Mdrop; double Alpha_star = vals.pl_star; double Alpha_esc = vals.pl_esc; double Fstar10 = vals.frac_star; double Fesc10 = vals.frac_esc; double Mlim_Fstar = vals.LimitMass_Fstar; double Mlim_Fesc = vals.LimitMass_Fesc; double Fstar,Fesc; if (Alpha_star > 0. && M > Mlim_Fstar) Fstar = 1./Fstar10; else if (Alpha_star < 0. && M < Mlim_Fstar) Fstar = 1./Fstar10; else Fstar = pow(M/1e10,Alpha_star); if (Alpha_esc > 0. && M > Mlim_Fesc) Fesc = 1./Fesc10; else if (Alpha_esc < 0. && M < Mlim_Fesc) Fesc = 1./Fesc10; else Fesc = pow(M/1e10,Alpha_esc); return M*exp(-MassTurnover/M)*Fstar*Fesc*dNdM_conditional(growthf,log(M),M2,del1,del2,sigma2)/sqrt(2.*PI); } double Nion_ConditionalM_MINI(double growthf, double M1, double M2, double sigma2, double delta1, double delta2, double MassTurnover, double MassTurnover_upper, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc, bool FAST_FCOLL_TABLES) { if (FAST_FCOLL_TABLES) { //JBM: Fast tables. Assume sharp Mturn, not exponential cutoff. return GaussLegendreQuad_Nion_MINI(0, 0, (float) growthf, (float) M2, (float) sigma2, (float) delta1, (float) delta2, (float) MassTurnover, (float) MassTurnover_upper, (float) Alpha_star, (float) Alpha_esc, (float) Fstar10, (float) Fesc10, (float) Mlim_Fstar, (float) Mlim_Fesc, FAST_FCOLL_TABLES); } else{ //standard old code double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = 0.01; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con = { .gf_obs = growthf, .Mval = M2, .sigma2 = sigma2, .delta1 = delta1, .delta2 = delta2, .Mdrop = MassTurnover, .Mdrop_upper = MassTurnover_upper, .pl_star = Alpha_star, .pl_esc = Alpha_esc, .frac_star = Fstar10, .frac_esc = Fesc10, .LimitMass_Fstar = Mlim_Fstar, .LimitMass_Fesc = Mlim_Fesc }; int status; F.function = &dNion_ConditionallnM_MINI; F.params = &parameters_gsl_SFR_con; lower_limit = M1; upper_limit = M2; gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("gsl integration error occured!"); LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error); LOG_ERROR("data: growthf=%e M2=%e sigma2=%e delta1=%e delta2=%e MassTurnover=%e",growthf,M2,sigma2,delta1,delta2,MassTurnover); LOG_ERROR("data: MassTurnover_upper=%e Alpha_star=%e Alpha_esc=%e Fstar10=%e Fesc10=%e Mlim_Fstar=%e Mlim_Fesc=%e",MassTurnover_upper,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc); GSL_ERROR(status); } gsl_integration_workspace_free (w); if(delta2 > delta1) { result = 1.; return result; } else { return result; } } } double Nion_ConditionalM(double growthf, double M1, double M2, double sigma2, double delta1, double delta2, double MassTurnover, double Alpha_star, double Alpha_esc, double Fstar10, double Fesc10, double Mlim_Fstar, double Mlim_Fesc, bool FAST_FCOLL_TABLES) { if (FAST_FCOLL_TABLES && global_params.USE_FAST_ATOMIC) { //JBM: Fast tables. Assume sharp Mturn, not exponential cutoff. return GaussLegendreQuad_Nion(0, 0, (float) growthf, (float) M2, (float) sigma2, (float) delta1, (float) delta2, (float) MassTurnover, (float) Alpha_star, (float) Alpha_esc, (float) Fstar10, (float) Fesc10, (float) Mlim_Fstar, (float) Mlim_Fesc, FAST_FCOLL_TABLES); } else{ //standard double result, error, lower_limit, upper_limit; gsl_function F; double rel_tol = 0.01; //<- relative tolerance gsl_integration_workspace * w = gsl_integration_workspace_alloc (1000); struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con = { .gf_obs = growthf, .Mval = M2, .sigma2 = sigma2, .delta1 = delta1, .delta2 = delta2, .Mdrop = MassTurnover, .pl_star = Alpha_star, .pl_esc = Alpha_esc, .frac_star = Fstar10, .frac_esc = Fesc10, .LimitMass_Fstar = Mlim_Fstar, .LimitMass_Fesc = Mlim_Fesc }; F.function = &dNion_ConditionallnM; F.params = &parameters_gsl_SFR_con; lower_limit = M1; upper_limit = M2; int status; gsl_set_error_handler_off(); status = gsl_integration_qag (&F, lower_limit, upper_limit, 0, rel_tol, 1000, GSL_INTEG_GAUSS61, w, &result, &error); if(status!=0) { LOG_ERROR("gsl integration error occured!"); LOG_ERROR("(function argument): lower_limit=%e upper_limit=%e rel_tol=%e result=%e error=%e",lower_limit,upper_limit,rel_tol,result,error); LOG_ERROR("data: growthf=%e M1=%e M2=%e sigma2=%e delta1=%e delta2=%e",growthf,M1,M2,sigma2,delta1,delta2); LOG_ERROR("data: MassTurnover=%e Alpha_star=%e Alpha_esc=%e Fstar10=%e Fesc10=%e Mlim_Fstar=%e Mlim_Fesc=%e",MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc); GSL_ERROR(status); } gsl_integration_workspace_free (w); if(delta2 > delta1) { result = 1.; return result; } else { return result; } } } float Nion_ConditionallnM_GL_MINI(float lnM, struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con){ float M = exp(lnM); float growthf = parameters_gsl_SFR_con.gf_obs; float M2 = parameters_gsl_SFR_con.Mval; float sigma2 = parameters_gsl_SFR_con.sigma2; float del1 = parameters_gsl_SFR_con.delta1; float del2 = parameters_gsl_SFR_con.delta2; float MassTurnover = parameters_gsl_SFR_con.Mdrop; float MassTurnover_upper = parameters_gsl_SFR_con.Mdrop_upper; float Alpha_star = parameters_gsl_SFR_con.pl_star; float Alpha_esc = parameters_gsl_SFR_con.pl_esc; float Fstar7_MINI = parameters_gsl_SFR_con.frac_star; float Fesc7_MINI = parameters_gsl_SFR_con.frac_esc; float Mlim_Fstar = parameters_gsl_SFR_con.LimitMass_Fstar; float Mlim_Fesc = parameters_gsl_SFR_con.LimitMass_Fesc; float Fstar,Fesc; if (Alpha_star > 0. && M > Mlim_Fstar) Fstar = 1./Fstar7_MINI; else if (Alpha_star < 0. && M < Mlim_Fstar) Fstar = 1./Fstar7_MINI; else Fstar = pow(M/1e7,Alpha_star); if (Alpha_esc > 0. && M > Mlim_Fesc) Fesc = 1./Fesc7_MINI; else if (Alpha_esc < 0. && M < Mlim_Fesc) Fesc = 1./Fesc7_MINI; else Fesc = pow(M/1e7,Alpha_esc); return M*exp(-MassTurnover/M)*exp(-M/MassTurnover_upper)*Fstar*Fesc*dNdM_conditional(growthf,log(M),M2,del1,del2,sigma2)/sqrt(2.*PI); } float Nion_ConditionallnM_GL(float lnM, struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con){ float M = exp(lnM); float growthf = parameters_gsl_SFR_con.gf_obs; float M2 = parameters_gsl_SFR_con.Mval; float sigma2 = parameters_gsl_SFR_con.sigma2; float del1 = parameters_gsl_SFR_con.delta1; float del2 = parameters_gsl_SFR_con.delta2; float MassTurnover = parameters_gsl_SFR_con.Mdrop; float Alpha_star = parameters_gsl_SFR_con.pl_star; float Alpha_esc = parameters_gsl_SFR_con.pl_esc; float Fstar10 = parameters_gsl_SFR_con.frac_star; float Fesc10 = parameters_gsl_SFR_con.frac_esc; float Mlim_Fstar = parameters_gsl_SFR_con.LimitMass_Fstar; float Mlim_Fesc = parameters_gsl_SFR_con.LimitMass_Fesc; float Fstar,Fesc; if (Alpha_star > 0. && M > Mlim_Fstar) Fstar = 1./Fstar10; else if (Alpha_star < 0. && M < Mlim_Fstar) Fstar = 1./Fstar10; else Fstar = pow(M/1e10,Alpha_star); if (Alpha_esc > 0. && M > Mlim_Fesc) Fesc = 1./Fesc10; else if (Alpha_esc < 0. && M < Mlim_Fesc) Fesc = 1./Fesc10; else Fesc = pow(M/1e10,Alpha_esc); return M*exp(-MassTurnover/M)*Fstar*Fesc*dNdM_conditional(growthf,log(M),M2,del1,del2,sigma2)/sqrt(2.*PI); } //JBM: Same as above but for minihaloes. Has two cutoffs, lower and upper. float GaussLegendreQuad_Nion_MINI(int Type, int n, float growthf, float M2, float sigma2, float delta1, float delta2, float MassTurnover, float MassTurnover_upper, float Alpha_star, float Alpha_esc, float Fstar7_MINI, float Fesc7_MINI, float Mlim_Fstar_MINI, float Mlim_Fesc_MINI, bool FAST_FCOLL_TABLES) { double result, nu_lower_limit, nu_higher_limit, nupivot; int i; double integrand, x; integrand = 0.; struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con = { .gf_obs = growthf, .Mval = M2, .sigma2 = sigma2, .delta1 = delta1, .delta2 = delta2, .Mdrop = MassTurnover, .Mdrop_upper = MassTurnover_upper, .pl_star = Alpha_star, .pl_esc = Alpha_esc, .frac_star = Fstar7_MINI, .frac_esc = Fesc7_MINI, .LimitMass_Fstar = Mlim_Fstar_MINI, .LimitMass_Fesc = Mlim_Fesc_MINI }; if(delta2 > delta1*0.9999) { result = 1.; return result; } if(FAST_FCOLL_TABLES){ //JBM: Fast tables. Assume sharp Mturn, not exponential cutoff. if(MassTurnover_upper <= MassTurnover){ return 1e-40; //in sharp cut it's zero } double delta_arg = pow( (delta1 - delta2)/growthf , 2.); double LogMass=log(MassTurnover); int MassBin = (int)floor( (LogMass - MinMass )*inv_mass_bin_width ); double MassBinLow = MinMass + mass_bin_width*(double)MassBin; double sigmaM1 = Sigma_InterpTable[MassBin] + ( LogMass - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; nu_lower_limit = delta_arg/(sigmaM1 * sigmaM1 - sigma2 * sigma2); LogMass = log(MassTurnover_upper); MassBin = (int)floor( (LogMass - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(double)MassBin; double sigmaM2 = Sigma_InterpTable[MassBin] + ( LogMass - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; nu_higher_limit = delta_arg/(sigmaM2*sigmaM2-sigma2*sigma2); //note we keep nupivot1 just in case very negative delta makes it reach that nu LogMass = log(MPIVOT1); //jbm could be done outside and it'd be even faster int MassBinpivot = (int)floor( (LogMass - MinMass )*inv_mass_bin_width ); double MassBinLowpivot = MinMass + mass_bin_width*(double)MassBinpivot; double sigmapivot1 = Sigma_InterpTable[MassBinpivot] + ( LogMass - MassBinLowpivot )*( Sigma_InterpTable[MassBinpivot+1] - Sigma_InterpTable[MassBinpivot] )*inv_mass_bin_width; double nupivot1 = delta_arg/(sigmapivot1*sigmapivot1); //note, it does not have the sigma2 on purpose. LogMass = log(MPIVOT2); //jbm could be done outside and it'd be even faster MassBinpivot = (int)floor( (LogMass - MinMass )*inv_mass_bin_width ); MassBinLowpivot = MinMass + mass_bin_width*(double)MassBinpivot; double sigmapivot2 = Sigma_InterpTable[MassBinpivot] + ( LogMass - MassBinLowpivot )*( Sigma_InterpTable[MassBinpivot+1] - Sigma_InterpTable[MassBinpivot] )*inv_mass_bin_width; double nupivot2 = delta_arg/(sigmapivot2*sigmapivot2); double beta1 = (Alpha_star+Alpha_esc) * AINDEX1 * (0.5); //exponent for Fcollapprox for nu>nupivot1 (large M) double beta2 = (Alpha_star+Alpha_esc) * AINDEX2 * (0.5); //exponent for Fcollapprox for nupivot1>nu>nupivot2 (small M) double beta3 = (Alpha_star+Alpha_esc) * AINDEX3 * (0.5); //exponent for Fcollapprox for nu<nupivot2 (smallest M) //beta2 fixed by continuity. // // 3PLs double fcollres=0.0; double fcollres_high=0.0; //for the higher threshold to subtract // re-written for further speedups if (nu_higher_limit <= nupivot2){ //if both are below pivot2 don't bother adding and subtracting the high contribution fcollres=(Fcollapprox(nu_lower_limit,beta3))*pow(nupivot2,-beta3); fcollres_high=(Fcollapprox(nu_higher_limit,beta3))*pow(nupivot2,-beta3); } else { fcollres_high=(Fcollapprox(nu_higher_limit,beta2))*pow(nupivot1,-beta2); if (nu_lower_limit > nupivot2){ fcollres=(Fcollapprox(nu_lower_limit,beta2))*pow(nupivot1,-beta2); } else { fcollres=(Fcollapprox(nupivot2,beta2))*pow(nupivot1,-beta2); fcollres+=(Fcollapprox(nu_lower_limit,beta3)-Fcollapprox(nupivot2,beta3) )*pow(nupivot2,-beta3); } } if (fcollres < fcollres_high){ return 1e-40; } return (fcollres-fcollres_high); } else{ for(i=1; i<(n+1); i++){ if(Type==1) { x = xi_SFR_Xray[i]; integrand += wi_SFR_Xray[i]*Nion_ConditionallnM_GL_MINI(x,parameters_gsl_SFR_con); } if(Type==0) { x = xi_SFR[i]; integrand += wi_SFR[i]*Nion_ConditionallnM_GL_MINI(x,parameters_gsl_SFR_con); } } return integrand; } } //JBM: Added the approximation if user_params->FAST_FCOLL_TABLES==True float GaussLegendreQuad_Nion(int Type, int n, float growthf, float M2, float sigma2, float delta1, float delta2, float MassTurnover, float Alpha_star, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, bool FAST_FCOLL_TABLES) { //Performs the Gauss-Legendre quadrature. int i; double result, nu_lower_limit, nupivot; if(delta2 > delta1*0.9999) { result = 1.; return result; } double integrand, x; integrand = 0.; struct parameters_gsl_SFR_con_int_ parameters_gsl_SFR_con = { .gf_obs = growthf, .Mval = M2, .sigma2 = sigma2, .delta1 = delta1, .delta2 = delta2, .Mdrop = MassTurnover, .pl_star = Alpha_star, .pl_esc = Alpha_esc, .frac_star = Fstar10, .frac_esc = Fesc10, .LimitMass_Fstar = Mlim_Fstar, .LimitMass_Fesc = Mlim_Fesc }; if (FAST_FCOLL_TABLES && global_params.USE_FAST_ATOMIC){ //JBM: Fast tables. Assume sharp Mturn, not exponential cutoff. double delta_arg = pow( (delta1 - delta2)/growthf , 2.0); double LogMass=log(MassTurnover); int MassBin = (int)floor( (LogMass - MinMass )*inv_mass_bin_width ); double MassBinLow = MinMass + mass_bin_width*(double)MassBin; double sigmaM1 = Sigma_InterpTable[MassBin] + ( LogMass - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; nu_lower_limit = delta_arg/(sigmaM1*sigmaM1-sigma2*sigma2); LogMass = log(MPIVOT1); //jbm could be done outside and it'd be even faster int MassBinpivot = (int)floor( (LogMass - MinMass )*inv_mass_bin_width ); double MassBinLowpivot = MinMass + mass_bin_width*(double)MassBinpivot; double sigmapivot1 = Sigma_InterpTable[MassBinpivot] + ( LogMass - MassBinLowpivot )*( Sigma_InterpTable[MassBinpivot+1] - Sigma_InterpTable[MassBinpivot] )*inv_mass_bin_width; double nupivot1 = delta_arg/(sigmapivot1*sigmapivot1); //note, it does not have the sigma2 on purpose. LogMass = log(MPIVOT2); //jbm could be done outside and it'd be even faster MassBinpivot = (int)floor( (LogMass - MinMass )*inv_mass_bin_width ); MassBinLowpivot = MinMass + mass_bin_width*(double)MassBinpivot; double sigmapivot2 = Sigma_InterpTable[MassBinpivot] + ( LogMass - MassBinLowpivot )*( Sigma_InterpTable[MassBinpivot+1] - Sigma_InterpTable[MassBinpivot] )*inv_mass_bin_width; double nupivot2 = delta_arg/(sigmapivot2*sigmapivot2); double beta1 = (Alpha_star+Alpha_esc) * AINDEX1 * (0.5); //exponent for Fcollapprox for nu>nupivot1 (large M) double beta2 = (Alpha_star+Alpha_esc) * AINDEX2 * (0.5); //exponent for Fcollapprox for nupivot2<nu<nupivot1 (small M) double beta3 = (Alpha_star+Alpha_esc) * AINDEX3 * (0.5); //exponent for Fcollapprox for nu<nupivot2 (smallest M) //beta2 fixed by continuity. double nucrit_sigma2 = delta_arg*pow(sigma2+1e-10,-2.0); //above this nu sigma2>sigma1, so HMF=0. eps added to avoid infinities // // 3PLs double fcollres=0.0; if(nu_lower_limit >= nucrit_sigma2){ //fully in the flat part of sigma(nu), M^alpha is nu-independent. return 1e-40; } else{ //we subtract the contribution from high nu, since the HMF is set to 0 if sigma2>sigma1 fcollres -= Fcollapprox(nucrit_sigma2,beta1)*pow(nupivot1,-beta1); } if(nu_lower_limit >= nupivot1){ fcollres+=Fcollapprox(nu_lower_limit,beta1)*pow(nupivot1,-beta1); } else{ fcollres+=Fcollapprox(nupivot1,beta1)*pow(nupivot1,-beta1); if (nu_lower_limit > nupivot2){ fcollres+=(Fcollapprox(nu_lower_limit,beta2)-Fcollapprox(nupivot1,beta2))*pow(nupivot1,-beta2); } else { fcollres+=(Fcollapprox(nupivot2,beta2)-Fcollapprox(nupivot1,beta2) )*pow(nupivot1,-beta2); fcollres+=(Fcollapprox(nu_lower_limit,beta3)-Fcollapprox(nupivot2,beta3) )*pow(nupivot2,-beta3); } } if (fcollres<=0.0){ LOG_DEBUG("Negative fcoll? fc=%.1le Mt=%.1le \n",fcollres, MassTurnover); fcollres=1e-40; } return fcollres; } else{ for(i=1; i<(n+1); i++){ if(Type==1) { x = xi_SFR_Xray[i]; integrand += wi_SFR_Xray[i]*Nion_ConditionallnM_GL(x,parameters_gsl_SFR_con); } if(Type==0) { x = xi_SFR[i]; integrand += wi_SFR[i]*Nion_ConditionallnM_GL(x,parameters_gsl_SFR_con); } } return integrand; } } #include <gsl/gsl_sf_gamma.h> //JBM: Integral of a power-law times exponential for EPS: \int dnu nu^beta * exp(-nu/2)/sqrt(nu) from numin to infty. double Fcollapprox (double numin, double beta){ //nu is deltacrit^2/sigma^2, corrected by delta(R) and sigma(R) double gg = gsl_sf_gamma_inc(0.5+beta,0.5*numin); return gg*pow(2,0.5+beta)*pow(2.0*PI,-0.5); } void initialise_Nion_General_spline(float z, float min_density, float max_density, float Mmax, float MassTurnover, float Alpha_star, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, bool FAST_FCOLL_TABLES){ float Mmin = MassTurnover/50.; double overdense_val, growthf, sigma2; double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION*0.999; double overdense_small_high, overdense_small_low; int i; float ln_10; if(max_density > global_params.CRIT_DENS_TRANSITION*1.001) { overdense_small_high = global_params.CRIT_DENS_TRANSITION*1.001; } else { overdense_small_high = max_density; } overdense_small_low = min_density; ln_10 = log(10); float MassBinLow; int MassBin; growthf = dicke(z); Mmin = log(Mmin); Mmax = log(Mmax); MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; #pragma omp parallel shared(log10_overdense_spline_SFR,log10_Nion_spline,overdense_small_low,overdense_small_high,growthf,Mmax,sigma2,MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc) private(i,overdense_val) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<NSFR_low; i++){ overdense_val = log10(1. + overdense_small_low) + (double)i/((double)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low)); log10_overdense_spline_SFR[i] = overdense_val; log10_Nion_spline[i] = GaussLegendreQuad_Nion(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,pow(10.,overdense_val)-1.,MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES); if(fabs(log10_Nion_spline[i]) < 1e-38) { log10_Nion_spline[i] = 1e-38; } log10_Nion_spline[i] = log10(log10_Nion_spline[i]); if(log10_Nion_spline[i] < -40.){ log10_Nion_spline[i] = -40.; } log10_Nion_spline[i] *= ln_10; } } for (i=0; i<NSFR_low; i++){ if(!isfinite(log10_Nion_spline[i])) { LOG_ERROR("Detected either an infinite or NaN value in log10_Nion_spline"); // Throw(ParameterError); Throw(TableGenerationError); } } #pragma omp parallel shared(Overdense_spline_SFR,Nion_spline,overdense_large_low,overdense_large_high,growthf,Mmin,Mmax,sigma2,MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc) private(i) num_threads(user_params_ps->N_THREADS) { #pragma omp for for(i=0;i<NSFR_high;i++) { Overdense_spline_SFR[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low); Nion_spline[i] = Nion_ConditionalM(growthf,Mmin,Mmax,sigma2,Deltac,Overdense_spline_SFR[i],MassTurnover,Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES); if(Nion_spline[i]<0.) { Nion_spline[i]=pow(10.,-40.0); } } } for(i=0;i<NSFR_high;i++) { if(!isfinite(Nion_spline[i])) { LOG_ERROR("Detected either an infinite or NaN value in log10_Nion_spline"); // Throw(ParameterError); Throw(TableGenerationError); } } } void initialise_Nion_General_spline_MINI(float z, float Mcrit_atom, float min_density, float max_density, float Mmax, float Mmin, float log10Mturn_min, float log10Mturn_max, float log10Mturn_min_MINI, float log10Mturn_max_MINI, float Alpha_star, float Alpha_star_mini, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, float Fstar7_MINI, float Fesc7_MINI, float Mlim_Fstar_MINI, float Mlim_Fesc_MINI, bool FAST_FCOLL_TABLES){ double growthf, sigma2; double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION*0.999; double overdense_small_high, overdense_small_low; int i,j; float ln_10; if(max_density > global_params.CRIT_DENS_TRANSITION*1.001) { overdense_small_high = global_params.CRIT_DENS_TRANSITION*1.001; } else { overdense_small_high = max_density; } overdense_small_low = min_density; ln_10 = log(10); float MassBinLow; int MassBin; growthf = dicke(z); Mmin = log(Mmin); Mmax = log(Mmax); MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; for (i=0; i<NSFR_low; i++){ log10_overdense_spline_SFR[i] = log10(1. + overdense_small_low) + (double)i/((double)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low)); } for (i=0;i<NSFR_high;i++) { Overdense_spline_SFR[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low); } for (i=0;i<NMTURN;i++){ Mturns[i] = pow(10., log10Mturn_min + (float)i/((float)NMTURN-1.)*(log10Mturn_max-log10Mturn_min)); Mturns_MINI[i] = pow(10., log10Mturn_min_MINI + (float)i/((float)NMTURN-1.)*(log10Mturn_max_MINI-log10Mturn_min_MINI)); } #pragma omp parallel shared(log10_Nion_spline,growthf,Mmax,sigma2,log10_overdense_spline_SFR,Mturns,Mturns_MINI,\ Alpha_star,Alpha_star_mini,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,ln_10,log10_Nion_spline_MINI,Mcrit_atom,\ Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI) \ private(i,j) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<NSFR_low; i++){ for (j=0; j<NMTURN; j++){ log10_Nion_spline[i+j*NSFR_low] = log10(GaussLegendreQuad_Nion(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,\ pow(10.,log10_overdense_spline_SFR[i])-1.,Mturns[j],Alpha_star,\ Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES)); if(log10_Nion_spline[i+j*NSFR_low] < -40.){ log10_Nion_spline[i+j*NSFR_low] = -40.; } log10_Nion_spline[i+j*NSFR_low] *= ln_10; log10_Nion_spline_MINI[i+j*NSFR_low] = log10(GaussLegendreQuad_Nion_MINI(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,\ pow(10.,log10_overdense_spline_SFR[i])-1.,Mturns_MINI[j],Mcrit_atom,\ Alpha_star_mini,Alpha_esc,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI, FAST_FCOLL_TABLES)); if(log10_Nion_spline_MINI[i+j*NSFR_low] < -40.){ log10_Nion_spline_MINI[i+j*NSFR_low] = -40.; } log10_Nion_spline_MINI[i+j*NSFR_low] *= ln_10; } } } for (i=0; i<NSFR_low; i++){ for (j=0; j<NMTURN; j++){ if(isfinite(log10_Nion_spline[i+j*NSFR_low])==0) { LOG_ERROR("Detected either an infinite or NaN value in log10_Nion_spline"); // Throw(ParameterError); Throw(TableGenerationError); } if(isfinite(log10_Nion_spline_MINI[i+j*NSFR_low])==0) { LOG_ERROR("Detected either an infinite or NaN value in log10_Nion_spline_MINI"); // Throw(ParameterError); Throw(TableGenerationError); } } } #pragma omp parallel shared(Nion_spline,growthf,Mmin,Mmax,sigma2,Overdense_spline_SFR,Mturns,Alpha_star,Alpha_star_mini,\ Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,Nion_spline_MINI,Mturns_MINI,Mcrit_atom,\ Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI) \ private(i,j) num_threads(user_params_ps->N_THREADS) { #pragma omp for for(i=0;i<NSFR_high;i++) { for (j=0; j<NMTURN; j++){ Nion_spline[i+j*NSFR_high] = Nion_ConditionalM( growthf,Mmin,Mmax,sigma2,Deltac,Overdense_spline_SFR[i], Mturns[j],Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES ); if(Nion_spline[i+j*NSFR_high]<0.) { Nion_spline[i+j*NSFR_high]=pow(10.,-40.0); } Nion_spline_MINI[i+j*NSFR_high] = Nion_ConditionalM_MINI( growthf,Mmin,Mmax,sigma2,Deltac,Overdense_spline_SFR[i], Mturns_MINI[j],Mcrit_atom,Alpha_star_mini,Alpha_esc,Fstar7_MINI,Fesc7_MINI, Mlim_Fstar_MINI,Mlim_Fesc_MINI, FAST_FCOLL_TABLES ); if(Nion_spline_MINI[i+j*NSFR_high]<0.) { Nion_spline_MINI[i+j*NSFR_high]=pow(10.,-40.0); } } } } for(i=0;i<NSFR_high;i++) { for (j=0; j<NMTURN; j++){ if(isfinite(Nion_spline[i+j*NSFR_high])==0) { LOG_ERROR("Detected either an infinite or NaN value in Nion_spline"); // Throw(ParameterError); Throw(TableGenerationError); } if(isfinite(Nion_spline_MINI[i+j*NSFR_high])==0) { LOG_ERROR("Detected either an infinite or NaN value in Nion_spline_MINI"); // Throw(ParameterError); Throw(TableGenerationError); } } } } void initialise_Nion_General_spline_MINI_prev(float z, float Mcrit_atom, float min_density, float max_density, float Mmax, float Mmin, float log10Mturn_min, float log10Mturn_max, float log10Mturn_min_MINI, float log10Mturn_max_MINI, float Alpha_star, float Alpha_star_mini, float Alpha_esc, float Fstar10, float Fesc10, float Mlim_Fstar, float Mlim_Fesc, float Fstar7_MINI, float Fesc7_MINI, float Mlim_Fstar_MINI, float Mlim_Fesc_MINI, bool FAST_FCOLL_TABLES){ double growthf, sigma2; double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION*0.999; double overdense_small_high, overdense_small_low; int i,j; float ln_10; if(max_density > global_params.CRIT_DENS_TRANSITION*1.001) { overdense_small_high = global_params.CRIT_DENS_TRANSITION*1.001; } else { overdense_small_high = max_density; } overdense_small_low = min_density; ln_10 = log(10); float MassBinLow; int MassBin; growthf = dicke(z); Mmin = log(Mmin); Mmax = log(Mmax); MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; for (i=0; i<NSFR_low; i++){ prev_log10_overdense_spline_SFR[i] = log10(1. + overdense_small_low) + (double)i/((double)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low)); } for (i=0;i<NSFR_high;i++) { prev_Overdense_spline_SFR[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low); } for (i=0;i<NMTURN;i++){ Mturns[i] = pow(10., log10Mturn_min + (float)i/((float)NMTURN-1.)*(log10Mturn_max-log10Mturn_min)); Mturns_MINI[i] = pow(10., log10Mturn_min_MINI + (float)i/((float)NMTURN-1.)*(log10Mturn_max_MINI-log10Mturn_min_MINI)); } #pragma omp parallel shared(prev_log10_Nion_spline,growthf,Mmax,sigma2,prev_log10_overdense_spline_SFR,Mturns,Alpha_star,Alpha_star_mini,\ Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,prev_log10_Nion_spline_MINI,Mturns_MINI,Mcrit_atom,\ Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI) \ private(i,j) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<NSFR_low; i++){ for (j=0; j<NMTURN; j++){ prev_log10_Nion_spline[i+j*NSFR_low] = log10(GaussLegendreQuad_Nion(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,\ pow(10.,prev_log10_overdense_spline_SFR[i])-1.,Mturns[j],\ Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES)); if(prev_log10_Nion_spline[i+j*NSFR_low] < -40.){ prev_log10_Nion_spline[i+j*NSFR_low] = -40.; } prev_log10_Nion_spline[i+j*NSFR_low] *= ln_10; prev_log10_Nion_spline_MINI[i+j*NSFR_low] = log10(GaussLegendreQuad_Nion_MINI(0,NGL_SFR,growthf,Mmax,sigma2,Deltac,\ pow(10.,prev_log10_overdense_spline_SFR[i])-1.,Mturns_MINI[j],Mcrit_atom,\ Alpha_star_mini,Alpha_esc,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI, FAST_FCOLL_TABLES)); if(prev_log10_Nion_spline_MINI[i+j*NSFR_low] < -40.){ prev_log10_Nion_spline_MINI[i+j*NSFR_low] = -40.; } prev_log10_Nion_spline_MINI[i+j*NSFR_low] *= ln_10; } } } for (i=0; i<NSFR_low; i++){ for (j=0; j<NMTURN; j++){ if(isfinite(prev_log10_Nion_spline[i+j*NSFR_low])==0) { LOG_ERROR("Detected either an infinite or NaN value in prev_log10_Nion_spline"); // Throw(ParameterError); Throw(TableGenerationError); } if(isfinite(prev_log10_Nion_spline_MINI[i+j*NSFR_low])==0) { LOG_ERROR("Detected either an infinite or NaN value in prev_log10_Nion_spline_MINI"); // Throw(ParameterError); Throw(TableGenerationError); } } } #pragma omp parallel shared(prev_Nion_spline,growthf,Mmin,Mmax,sigma2,prev_Overdense_spline_SFR,Mturns,\ Alpha_star,Alpha_star_mini,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,prev_Nion_spline_MINI,Mturns_MINI,\ Mcrit_atom,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI) \ private(i,j) num_threads(user_params_ps->N_THREADS) { #pragma omp for for(i=0;i<NSFR_high;i++) { for (j=0; j<NMTURN; j++){ prev_Nion_spline[i+j*NSFR_high] = Nion_ConditionalM(growthf,Mmin,Mmax,sigma2,Deltac,prev_Overdense_spline_SFR[i],\ Mturns[j],Alpha_star,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc, FAST_FCOLL_TABLES); if(prev_Nion_spline[i+j*NSFR_high]<0.) { prev_Nion_spline[i+j*NSFR_high]=pow(10.,-40.0); } prev_Nion_spline_MINI[i+j*NSFR_high] = Nion_ConditionalM_MINI(growthf,Mmin,Mmax,sigma2,Deltac,\ prev_Overdense_spline_SFR[i],Mturns_MINI[j],Mcrit_atom,Alpha_star_mini,\ Alpha_esc,Fstar7_MINI,Fesc7_MINI,Mlim_Fstar_MINI,Mlim_Fesc_MINI, FAST_FCOLL_TABLES); if(prev_Nion_spline_MINI[i+j*NSFR_high]<0.) { prev_Nion_spline_MINI[i+j*NSFR_high]=pow(10.,-40.0); } } } } for(i=0;i<NSFR_high;i++) { for (j=0; j<NMTURN; j++){ if(isfinite(prev_Nion_spline[i+j*NSFR_high])==0) { LOG_ERROR("Detected either an infinite or NaN value in prev_Nion_spline"); // Throw(ParameterError); Throw(TableGenerationError); } if(isfinite(prev_Nion_spline_MINI[i+j*NSFR_high])==0) { LOG_ERROR("Detected either an infinite or NaN value in prev_Nion_spline_MINI"); // Throw(ParameterError); Throw(TableGenerationError); } } } } void initialise_Nion_Ts_spline( int Nbin, float zmin, float zmax, float MassTurn, float Alpha_star, float Alpha_esc, float Fstar10, float Fesc10 ){ int i; float Mmin = MassTurn/50., Mmax = global_params.M_MAX_INTEGRAL; float Mlim_Fstar, Mlim_Fesc; if (z_val == NULL){ z_val = calloc(Nbin,sizeof(double)); Nion_z_val = calloc(Nbin,sizeof(double)); } Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10); Mlim_Fesc = Mass_limit_bisection(Mmin, Mmax, Alpha_esc, Fesc10); #pragma omp parallel shared(z_val,Nion_z_val,zmin,zmax, MassTurn, Alpha_star, Alpha_esc, Fstar10, Fesc10, Mlim_Fstar, Mlim_Fesc) private(i) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<Nbin; i++){ z_val[i] = zmin + (double)i/((double)Nbin-1.)*(zmax - zmin); Nion_z_val[i] = Nion_General(z_val[i], Mmin, MassTurn, Alpha_star, Alpha_esc, Fstar10, Fesc10, Mlim_Fstar, Mlim_Fesc); } } for (i=0; i<Nbin; i++){ if(isfinite(Nion_z_val[i])==0) { LOG_ERROR("Detected either an infinite or NaN value in Nion_z_val"); // Throw(ParameterError); Throw(TableGenerationError); } } } void initialise_Nion_Ts_spline_MINI( int Nbin, float zmin, float zmax, float Alpha_star, float Alpha_star_mini, float Alpha_esc, float Fstar10, float Fesc10, float Fstar7_MINI, float Fesc7_MINI ){ int i,j; float Mmin = global_params.M_MIN_INTEGRAL, Mmax = global_params.M_MAX_INTEGRAL; float Mlim_Fstar, Mlim_Fesc, Mlim_Fstar_MINI, Mlim_Fesc_MINI, Mcrit_atom_val; if (z_val == NULL){ z_val = calloc(Nbin,sizeof(double)); Nion_z_val = calloc(Nbin,sizeof(double)); Nion_z_val_MINI = calloc(Nbin*NMTURN,sizeof(double)); } Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10); Mlim_Fesc = Mass_limit_bisection(Mmin, Mmax, Alpha_esc, Fesc10); Mlim_Fstar_MINI = Mass_limit_bisection(Mmin, Mmax, Alpha_star_mini, Fstar7_MINI * pow(1e3, Alpha_star_mini)); Mlim_Fesc_MINI = Mass_limit_bisection(Mmin, Mmax, Alpha_esc, Fesc7_MINI * pow(1e3, Alpha_esc)); float MassTurnover[NMTURN]; for (i=0;i<NMTURN;i++){ MassTurnover[i] = pow(10., LOG10_MTURN_MIN + (float)i/((float)NMTURN-1.)*(LOG10_MTURN_MAX-LOG10_MTURN_MIN)); } #pragma omp parallel shared(z_val,Nion_z_val,Nbin,zmin,zmax,Mmin,Alpha_star,Alpha_star_mini,Alpha_esc,Fstar10,Fesc10,Mlim_Fstar,Mlim_Fesc,\ Nion_z_val_MINI,MassTurnover,Fstar7_MINI, Fesc7_MINI, Mlim_Fstar_MINI, Mlim_Fesc_MINI) \ private(i,j,Mcrit_atom_val) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<Nbin; i++){ z_val[i] = zmin + (double)i/((double)Nbin-1.)*(zmax - zmin); Mcrit_atom_val = atomic_cooling_threshold(z_val[i]); Nion_z_val[i] = Nion_General(z_val[i], Mmin, Mcrit_atom_val, Alpha_star, Alpha_esc, Fstar10, Fesc10, Mlim_Fstar, Mlim_Fesc); for (j=0; j<NMTURN; j++){ Nion_z_val_MINI[i+j*Nbin] = Nion_General_MINI(z_val[i], Mmin, MassTurnover[j], Mcrit_atom_val, Alpha_star_mini, Alpha_esc, Fstar7_MINI, Fesc7_MINI, Mlim_Fstar_MINI, Mlim_Fesc_MINI); } } } for (i=0; i<Nbin; i++){ if(isfinite(Nion_z_val[i])==0) { i = Nbin; LOG_ERROR("Detected either an infinite or NaN value in Nion_z_val"); // Throw(ParameterError); Throw(TableGenerationError); } for (j=0; j<NMTURN; j++){ if(isfinite(Nion_z_val_MINI[i+j*Nbin])==0){ j = NMTURN; LOG_ERROR("Detected either an infinite or NaN value in Nion_z_val_MINI"); // Throw(ParameterError); Throw(TableGenerationError); } } } } void initialise_SFRD_spline(int Nbin, float zmin, float zmax, float MassTurn, float Alpha_star, float Fstar10){ int i; float Mmin = MassTurn/50., Mmax = global_params.M_MAX_INTEGRAL; float Mlim_Fstar; if (z_X_val == NULL){ z_X_val = calloc(Nbin,sizeof(double)); SFRD_val = calloc(Nbin,sizeof(double)); } Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10); #pragma omp parallel shared(z_X_val,SFRD_val,zmin,zmax, MassTurn, Alpha_star, Fstar10, Mlim_Fstar) private(i) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<Nbin; i++){ z_X_val[i] = zmin + (double)i/((double)Nbin-1.)*(zmax - zmin); SFRD_val[i] = Nion_General(z_X_val[i], Mmin, MassTurn, Alpha_star, 0., Fstar10, 1.,Mlim_Fstar,0.); } } for (i=0; i<Nbin; i++){ if(isfinite(SFRD_val[i])==0) { LOG_ERROR("Detected either an infinite or NaN value in SFRD_val"); // Throw(ParameterError); Throw(TableGenerationError); } } } void initialise_SFRD_spline_MINI(int Nbin, float zmin, float zmax, float Alpha_star, float Alpha_star_mini, float Fstar10, float Fstar7_MINI){ int i,j; float Mmin = global_params.M_MIN_INTEGRAL, Mmax = global_params.M_MAX_INTEGRAL; float Mlim_Fstar, Mlim_Fstar_MINI, Mcrit_atom_val; if (z_X_val == NULL){ z_X_val = calloc(Nbin,sizeof(double)); SFRD_val = calloc(Nbin,sizeof(double)); SFRD_val_MINI = calloc(Nbin*NMTURN,sizeof(double)); } Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10); Mlim_Fstar_MINI = Mass_limit_bisection(Mmin, Mmax, Alpha_star_mini, Fstar7_MINI * pow(1e3, Alpha_star_mini)); float MassTurnover[NMTURN]; for (i=0;i<NMTURN;i++){ MassTurnover[i] = pow(10., LOG10_MTURN_MIN + (float)i/((float)NMTURN-1.)*(LOG10_MTURN_MAX-LOG10_MTURN_MIN)); } #pragma omp parallel shared(z_X_val,zmin,zmax,Nbin,SFRD_val,Mmin, Alpha_star,Alpha_star_mini,Fstar10,Mlim_Fstar,\ SFRD_val_MINI,MassTurnover,Fstar7_MINI,Mlim_Fstar_MINI) \ private(i,j,Mcrit_atom_val) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<Nbin; i++){ z_X_val[i] = zmin + (double)i/((double)Nbin-1.)*(zmax - zmin); Mcrit_atom_val = atomic_cooling_threshold(z_X_val[i]); SFRD_val[i] = Nion_General(z_X_val[i], Mmin, Mcrit_atom_val, Alpha_star, 0., Fstar10, 1.,Mlim_Fstar,0.); for (j=0; j<NMTURN; j++){ SFRD_val_MINI[i+j*Nbin] = Nion_General_MINI(z_X_val[i], Mmin, MassTurnover[j], Mcrit_atom_val, Alpha_star_mini, 0., Fstar7_MINI, 1.,Mlim_Fstar_MINI,0.); } } } for (i=0; i<Nbin; i++){ if(isfinite(SFRD_val[i])==0) { i = Nbin; LOG_ERROR("Detected either an infinite or NaN value in SFRD_val"); // Throw(ParameterError); Throw(TableGenerationError); } for (j=0; j<NMTURN; j++){ if(isfinite(SFRD_val_MINI[i+j*Nbin])==0) { j = NMTURN; LOG_ERROR("Detected either an infinite or NaN value in SFRD_val_MINI"); // Throw(ParameterError); Throw(TableGenerationError); } } } } void initialise_SFRD_Conditional_table( int Nfilter, float min_density[], float max_density[], float growthf[], float R[], float MassTurnover, float Alpha_star, float Fstar10, bool FAST_FCOLL_TABLES ){ double overdense_val; double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION; double overdense_small_high, overdense_small_low; float Mmin,Mmax,Mlim_Fstar,sigma2; int i,j,k,i_tot; float ln_10; ln_10 = log(10); Mmin = MassTurnover/50.; Mmax = RtoM(R[Nfilter-1]); Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10); Mmin = log(Mmin); for (i=0; i<NSFR_high;i++) { overdense_high_table[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low); } float MassBinLow; int MassBin; for (j=0; j < Nfilter; j++) { Mmax = RtoM(R[j]); initialiseGL_Nion_Xray(NGL_SFR, MassTurnover/50., Mmax); Mmax = log(Mmax); MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; if(min_density[j]*growthf[j] < -1.) { overdense_small_low = -1. + global_params.MIN_DENSITY_LOW_LIMIT; } else { overdense_small_low = min_density[j]*growthf[j]; } overdense_small_high = max_density[j]*growthf[j]; if(overdense_small_high > global_params.CRIT_DENS_TRANSITION) { overdense_small_high = global_params.CRIT_DENS_TRANSITION; } for (i=0; i<NSFR_low; i++) { overdense_val = log10(1. + overdense_small_low) + (float)i/((float)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low)); overdense_low_table[i] = pow(10.,overdense_val); } #pragma omp parallel shared(log10_SFRD_z_low_table,growthf,Mmax,sigma2,overdense_low_table,MassTurnover,Alpha_star,Fstar10,Mlim_Fstar) private(i) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<NSFR_low; i++){ log10_SFRD_z_low_table[j][i] = GaussLegendreQuad_Nion(1,NGL_SFR,growthf[j],Mmax,sigma2,Deltac,overdense_low_table[i]-1.,MassTurnover,Alpha_star,0.,Fstar10,1.,Mlim_Fstar,0., FAST_FCOLL_TABLES); if(fabs(log10_SFRD_z_low_table[j][i]) < 1e-38) { log10_SFRD_z_low_table[j][i] = 1e-38; } log10_SFRD_z_low_table[j][i] = log10(log10_SFRD_z_low_table[j][i]); log10_SFRD_z_low_table[j][i] += 10.0; log10_SFRD_z_low_table[j][i] *= ln_10; } } for (i=0; i<NSFR_low; i++){ if(isfinite(log10_SFRD_z_low_table[j][i])==0) { LOG_ERROR("Detected either an infinite or NaN value in log10_SFRD_z_low_table"); // Throw(ParameterError); Throw(TableGenerationError); } } #pragma omp parallel shared(SFRD_z_high_table,growthf,Mmin,Mmax,sigma2,overdense_high_table,MassTurnover,Alpha_star,Fstar10,Mlim_Fstar) private(i) num_threads(user_params_ps->N_THREADS) { #pragma omp for for(i=0;i<NSFR_high;i++) { SFRD_z_high_table[j][i] = Nion_ConditionalM(growthf[j],Mmin,Mmax,sigma2,Deltac,overdense_high_table[i],MassTurnover,Alpha_star,0.,Fstar10,1.,Mlim_Fstar,0., FAST_FCOLL_TABLES); SFRD_z_high_table[j][i] *= pow(10., 10.0); } } for(i=0;i<NSFR_high;i++) { if(isfinite(SFRD_z_high_table[j][i])==0) { LOG_ERROR("Detected either an infinite or NaN value in SFRD_z_high_table"); // Throw(ParameterError); Throw(TableGenerationError); } } } } void initialise_SFRD_Conditional_table_MINI( int Nfilter, float min_density[], float max_density[], float growthf[], float R[], float Mcrit_atom[], float Alpha_star, float Alpha_star_mini, float Fstar10, float Fstar7_MINI, bool FAST_FCOLL_TABLES ){ double overdense_val; double overdense_large_high = Deltac, overdense_large_low = global_params.CRIT_DENS_TRANSITION; double overdense_small_high, overdense_small_low; float Mmin,Mmax,Mlim_Fstar,sigma2,Mlim_Fstar_MINI; int i,j,k,i_tot; float ln_10; ln_10 = log(10); Mmin = global_params.M_MIN_INTEGRAL; Mmax = RtoM(R[Nfilter-1]); Mlim_Fstar = Mass_limit_bisection(Mmin, Mmax, Alpha_star, Fstar10); Mlim_Fstar_MINI = Mass_limit_bisection(Mmin, Mmax, Alpha_star_mini, Fstar7_MINI * pow(1e3, Alpha_star_mini)); float MassTurnover[NMTURN]; for (i=0;i<NMTURN;i++){ MassTurnover[i] = pow(10., LOG10_MTURN_MIN + (float)i/((float)NMTURN-1.)*(LOG10_MTURN_MAX-LOG10_MTURN_MIN)); } Mmin = log(Mmin); for (i=0; i<NSFR_high;i++) { overdense_high_table[i] = overdense_large_low + (float)i/((float)NSFR_high-1.)*(overdense_large_high - overdense_large_low); } float MassBinLow; int MassBin; for (j=0; j < Nfilter; j++) { Mmax = RtoM(R[j]); initialiseGL_Nion_Xray(NGL_SFR, global_params.M_MIN_INTEGRAL, Mmax); Mmax = log(Mmax); MassBin = (int)floor( ( Mmax - MinMass )*inv_mass_bin_width ); MassBinLow = MinMass + mass_bin_width*(float)MassBin; sigma2 = Sigma_InterpTable[MassBin] + ( Mmax - MassBinLow )*( Sigma_InterpTable[MassBin+1] - Sigma_InterpTable[MassBin] )*inv_mass_bin_width; if(min_density[j]*growthf[j] < -1.) { overdense_small_low = -1. + global_params.MIN_DENSITY_LOW_LIMIT; } else { overdense_small_low = min_density[j]*growthf[j]; } overdense_small_high = max_density[j]*growthf[j]; if(overdense_small_high > global_params.CRIT_DENS_TRANSITION) { overdense_small_high = global_params.CRIT_DENS_TRANSITION; } for (i=0; i<NSFR_low; i++) { overdense_val = log10(1. + overdense_small_low) + (float)i/((float)NSFR_low-1.)*(log10(1.+overdense_small_high)-log10(1.+overdense_small_low)); overdense_low_table[i] = pow(10.,overdense_val); } #pragma omp parallel shared(log10_SFRD_z_low_table,growthf,Mmax,sigma2,overdense_low_table,Mcrit_atom,Alpha_star,Alpha_star_mini,Fstar10,Mlim_Fstar,\ log10_SFRD_z_low_table_MINI,MassTurnover,Fstar7_MINI,Mlim_Fstar_MINI,ln_10) \ private(i,k) num_threads(user_params_ps->N_THREADS) { #pragma omp for for (i=0; i<NSFR_low; i++){ log10_SFRD_z_low_table[j][i] = log10(GaussLegendreQuad_Nion(1,NGL_SFR,growthf[j],Mmax,sigma2,Deltac,overdense_low_table[i]-1.,Mcrit_atom[j],Alpha_star,0.,Fstar10,1.,Mlim_Fstar,0., FAST_FCOLL_TABLES)); if(log10_SFRD_z_low_table[j][i] < -50.){ log10_SFRD_z_low_table[j][i] = -50.; } log10_SFRD_z_low_table[j][i] += 10.0; log10_SFRD_z_low_table[j][i] *= ln_10; for (k=0; k<NMTURN; k++){ log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] = log10(GaussLegendreQuad_Nion_MINI(1,NGL_SFR,growthf[j],Mmax,sigma2,Deltac,overdense_low_table[i]-1.,MassTurnover[k], Mcrit_atom[j],Alpha_star_mini,0.,Fstar7_MINI,1.,Mlim_Fstar_MINI, 0., FAST_FCOLL_TABLES)); if(log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] < -50.){ log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] = -50.; } log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] += 10.0; log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low] *= ln_10; } } } for (i=0; i<NSFR_low; i++){ if(isfinite(log10_SFRD_z_low_table[j][i])==0) { LOG_ERROR("Detected either an infinite or NaN value in log10_SFRD_z_low_table"); // Throw(ParameterError); Throw(TableGenerationError); } for (k=0; k<NMTURN; k++){ if(isfinite(log10_SFRD_z_low_table_MINI[j][i+k*NSFR_low])==0) { LOG_ERROR("Detected either an infinite or NaN value in log10_SFRD_z_low_table_MINI"); // Throw(ParameterError); Throw(TableGenerationError); } } } #pragma omp parallel shared(SFRD_z_high_table,growthf,Mmin,Mmax,sigma2,overdense_high_table,Mcrit_atom,Alpha_star,Alpha_star_mini,Fstar10,\ Mlim_Fstar,SFRD_z_high_table_MINI,MassTurnover,Fstar7_MINI,Mlim_Fstar_MINI) \ private(i,k) num_threads(user_params_ps->N_THREADS) { #pragma omp for for(i=0;i<NSFR_high;i++) { SFRD_z_high_table[j][i] = Nion_ConditionalM(growthf[j],Mmin,Mmax,sigma2,Deltac,overdense_high_table[i],\ Mcrit_atom[j],Alpha_star,0.,Fstar10,1.,Mlim_Fstar,0., FAST_FCOLL_TABLES); if (SFRD_z_high_table[j][i] < 1e-50){ SFRD_z_high_table[j][i] = 1e-50; } SFRD_z_high_table[j][i] *= pow(10., 10.0); for (k=0; k<NMTURN; k++){ SFRD_z_high_table_MINI[j][i+k*NSFR_high] = Nion_ConditionalM_MINI(growthf[j],Mmin,Mmax,sigma2,Deltac,\ overdense_high_table[i],MassTurnover[k],Mcrit_atom[j],\ Alpha_star_mini,0.,Fstar7_MINI,1.,Mlim_Fstar_MINI, 0., FAST_FCOLL_TABLES); if (SFRD_z_high_table_MINI[j][i+k*NSFR_high] < 1e-50){ SFRD_z_high_table_MINI[j][i+k*NSFR_high] = 1e-50; } } } } for(i=0;i<NSFR_high;i++) { if(isfinite(SFRD_z_high_table[j][i])==0) { LOG_ERROR("Detected either an infinite or NaN value in SFRD_z_high_table"); // Throw(ParameterError); Throw(TableGenerationError); } for (k=0; k<NMTURN; k++){ if(isfinite(SFRD_z_high_table_MINI[j][i+k*NSFR_high])==0) { LOG_ERROR("Detected either an infinite or NaN value in SFRD_z_high_table_MINI"); // Throw(ParameterError); Throw(TableGenerationError); } } } } } // The volume filling factor at a given redshift, Q(z), or find redshift at a given Q, z(Q). // // The evolution of Q can be written as // dQ/dt = n_{ion}/dt - Q/t_{rec}, // where n_{ion} is the number of ionizing photons per baryon. The averaged recombination time is given by // t_{rec} ~ 0.93 Gyr * (C_{HII}/3)^-1 * (T_0/2e4 K)^0.7 * ((1+z)/7)^-3. // We assume the clumping factor of C_{HII}=3 and the IGM temperature of T_0 = 2e4 K, following // Section 2.1 of Kuhlen & Faucher-Gigue`re (2012) MNRAS, 423, 862 and references therein. // 1) initialise interpolation table // -> initialise_Q_value_spline(NoRec, M_TURN, ALPHA_STAR, ALPHA_ESC, F_STAR10, F_ESC10) // NoRec = 0: Compute dQ/dt with the recombination time. // NoRec = 1: Ignore recombination. // 2) find Q value at a given z -> Q_at_z(z, &(Q)) // or find z at a given Q -> z_at_Q(Q, &(z)). // 3) free memory allocation -> free_Q_value() // Set up interpolation table for the volume filling factor, Q, at a given redshift z and redshift at a given Q. int InitialisePhotonCons(struct UserParams *user_params, struct CosmoParams *cosmo_params, struct AstroParams *astro_params, struct FlagOptions *flag_options) { /* This is an API-level function for initialising the photon conservation. */ int status; Try{ // this try wraps the whole function. Broadcast_struct_global_PS(user_params,cosmo_params); Broadcast_struct_global_UF(user_params,cosmo_params); init_ps(); // To solve differentail equation, uses Euler's method. // NOTE: // (1) With the fiducial parameter set, // when the Q value is < 0.9, the difference is less than 5% compared with accurate calculation. // When Q ~ 0.98, the difference is ~25%. To increase accuracy one can reduce the step size 'da', but it will increase computing time. // (2) With the fiducial parameter set, // the difference for the redshift where the reionization end (Q = 1) is ~0.2 % compared with accurate calculation. float ION_EFF_FACTOR,M_MIN,M_MIN_z0,M_MIN_z1,Mlim_Fstar, Mlim_Fesc; double a_start = 0.03, a_end = 1./(1. + global_params.PhotonConsEndCalibz); // Scale factors of 0.03 and 0.17 correspond to redshifts of ~32 and ~5.0, respectively. double C_HII = 3., T_0 = 2e4; double reduce_ratio = 1.003; double Q0,Q1,Nion0,Nion1,Trec,da,a,z0,z1,zi,dadt,ans,delta_a,zi_prev,Q1_prev; double *z_arr,*Q_arr; int Nmax = 2000; // This is the number of step, enough with 'da = 2e-3'. If 'da' is reduced, this number should be checked. int cnt, nbin, i, istart; int fail_condition, not_mono_increasing, num_fails; int gsl_status; z_arr = calloc(Nmax,sizeof(double)); Q_arr = calloc(Nmax,sizeof(double)); //set the minimum source mass if (flag_options->USE_MASS_DEPENDENT_ZETA) { ION_EFF_FACTOR = global_params.Pop2_ion * astro_params->F_STAR10 * astro_params->F_ESC10; M_MIN = astro_params->M_TURN/50.; Mlim_Fstar = Mass_limit_bisection(M_MIN, global_params.M_MAX_INTEGRAL, astro_params->ALPHA_STAR, astro_params->F_STAR10); Mlim_Fesc = Mass_limit_bisection(M_MIN, global_params.M_MAX_INTEGRAL, astro_params->ALPHA_ESC, astro_params->F_ESC10); if(user_params->FAST_FCOLL_TABLES){ initialiseSigmaMInterpTable(fmin(MMIN_FAST,M_MIN),1e20); } else{ initialiseSigmaMInterpTable(M_MIN,1e20); } } else { ION_EFF_FACTOR = astro_params->HII_EFF_FACTOR; } fail_condition = 1; num_fails = 0; // We are going to come up with the analytic curve for the photon non conservation correction // This can be somewhat numerically unstable and as such we increase the sampling until it works // If it fails to produce a monotonically increasing curve (for Q as a function of z) after 10 attempts we crash out while(fail_condition!=0) { a = a_start; if(num_fails < 3) { da = 3e-3 - ((double)num_fails)*(1e-3); } else { da = 1e-3 - ((double)num_fails - 2.)*(1e-4); } delta_a = 1e-7; zi_prev = Q1_prev = 0.; not_mono_increasing = 0; if(num_fails>0) { for(i=0;i<Nmax;i++) { z_arr[i] = 0.; Q_arr[i] = 0.; } } cnt = 0; Q0 = 0.; while (a < a_end) { zi = 1./a - 1.; z0 = 1./(a+delta_a) - 1.; z1 = 1./(a-delta_a) - 1.; // Ionizing emissivity (num of photons per baryon) if (flag_options->USE_MASS_DEPENDENT_ZETA) { Nion0 = ION_EFF_FACTOR*Nion_General(z0, astro_params->M_TURN/50., astro_params->M_TURN, astro_params->ALPHA_STAR, astro_params->ALPHA_ESC, astro_params->F_STAR10, astro_params->F_ESC10, Mlim_Fstar, Mlim_Fesc); Nion1 = ION_EFF_FACTOR*Nion_General(z1, astro_params->M_TURN/50., astro_params->M_TURN, astro_params->ALPHA_STAR, astro_params->ALPHA_ESC, astro_params->F_STAR10, astro_params->F_ESC10, Mlim_Fstar, Mlim_Fesc); } else { //set the minimum source mass if (astro_params->ION_Tvir_MIN < 9.99999e3) { // neutral IGM M_MIN_z0 = (float)TtoM(z0, astro_params->ION_Tvir_MIN, 1.22); M_MIN_z1 = (float)TtoM(z1, astro_params->ION_Tvir_MIN, 1.22); } else { // ionized IGM M_MIN_z0 = (float)TtoM(z0, astro_params->ION_Tvir_MIN, 0.6); M_MIN_z1 = (float)TtoM(z1, astro_params->ION_Tvir_MIN, 0.6); } if(M_MIN_z0 < M_MIN_z1) { if(user_params->FAST_FCOLL_TABLES){ initialiseSigmaMInterpTable(fmin(MMIN_FAST,M_MIN_z0),1e20); } else{ initialiseSigmaMInterpTable(M_MIN_z0,1e20); } } else { if(user_params->FAST_FCOLL_TABLES){ initialiseSigmaMInterpTable(fmin(MMIN_FAST,M_MIN_z1),1e20); } else{ initialiseSigmaMInterpTable(M_MIN_z1,1e20); } } Nion0 = ION_EFF_FACTOR*FgtrM_General(z0,M_MIN_z0); Nion1 = ION_EFF_FACTOR*FgtrM_General(z1,M_MIN_z1); freeSigmaMInterpTable(); } // With scale factor a, the above equation is written as dQ/da = n_{ion}/da - Q/t_{rec}*(dt/da) if (!global_params.RecombPhotonCons) { Q1 = Q0 + ((Nion0-Nion1)/2/delta_a)*da; // No Recombination } else { dadt = Ho*sqrt(cosmo_params_ps->OMm/a + global_params.OMr/a/a + cosmo_params_ps->OMl*a*a); // da/dt = Ho*a*sqrt(OMm/a^3 + OMr/a^4 + OMl) Trec = 0.93 * 1e9 * SperYR * pow(C_HII/3.,-1) * pow(T_0/2e4,0.7) * pow((1.+zi)/7.,-3); Q1 = Q0 + ((Nion0-Nion1)/2./delta_a - Q0/Trec/dadt)*da; } // Curve is no longer monotonically increasing, we are going to have to exit and start again if(Q1 < Q1_prev) { not_mono_increasing = 1; break; } zi_prev = zi; Q1_prev = Q1; z_arr[cnt] = zi; Q_arr[cnt] = Q1; cnt = cnt + 1; if (Q1 >= 1.0) break; // if fully ionized, stop here. // As the Q value increases, the bin size decreases gradually because more accurate calculation is required. if (da < 7e-5) da = 7e-5; // set minimum bin size. else da = pow(da,reduce_ratio); Q0 = Q1; a = a + da; } // A check to see if we ended up with a monotonically increasing function if(not_mono_increasing==0) { fail_condition = 0; } else { num_fails += 1; if(num_fails>10) { LOG_ERROR("Failed too many times."); // Throw ParameterError; Throw(PhotonConsError); } } } cnt = cnt - 1; istart = 0; for (i=1;i<cnt;i++){ if (Q_arr[i-1] == 0. && Q_arr[i] != 0.) istart = i-1; } nbin = cnt - istart; N_analytic = nbin; // initialise interploation Q as a function of z z_Q = calloc(nbin,sizeof(double)); Q_value = calloc(nbin,sizeof(double)); Q_at_z_spline_acc = gsl_interp_accel_alloc (); Q_at_z_spline = gsl_spline_alloc (gsl_interp_cspline, nbin); for (i=0; i<nbin; i++){ z_Q[i] = z_arr[cnt-i]; Q_value[i] = Q_arr[cnt-i]; } gsl_set_error_handler_off(); gsl_status = gsl_spline_init(Q_at_z_spline, z_Q, Q_value, nbin); GSL_ERROR(gsl_status); Zmin = z_Q[0]; Zmax = z_Q[nbin-1]; Qmin = Q_value[nbin-1]; Qmax = Q_value[0]; // initialise interpolation z as a function of Q double *Q_z = calloc(nbin,sizeof(double)); double *z_value = calloc(nbin,sizeof(double)); z_at_Q_spline_acc = gsl_interp_accel_alloc (); z_at_Q_spline = gsl_spline_alloc (gsl_interp_linear, nbin); for (i=0; i<nbin; i++){ Q_z[i] = Q_value[nbin-1-i]; z_value[i] = z_Q[nbin-1-i]; } gsl_status = gsl_spline_init(z_at_Q_spline, Q_z, z_value, nbin); GSL_ERROR(gsl_status); free(z_arr); free(Q_arr); if (flag_options->USE_MASS_DEPENDENT_ZETA) { freeSigmaMInterpTable; } LOG_DEBUG("Initialised PhotonCons."); } // End of try Catch(status){ return status; } return(0); } // Function to construct the spline for the calibration curve of the photon non-conservation int PhotonCons_Calibration(double *z_estimate, double *xH_estimate, int NSpline){ int status; Try{ if(xH_estimate[NSpline-1] > 0.0 && xH_estimate[NSpline-2] > 0.0 && xH_estimate[NSpline-3] > 0.0 && xH_estimate[0] <= global_params.PhotonConsStart) { initialise_NFHistory_spline(z_estimate,xH_estimate,NSpline); } } Catch(status){ return status; } return(0); } // Function callable from Python to know at which redshift to start sampling the calibration curve (to minimise function calls) int ComputeZstart_PhotonCons(double *zstart) { int status; double temp; Try{ if((1.-global_params.PhotonConsStart) > Qmax) { // It is possible that reionisation never even starts // Just need to arbitrarily set a high redshift to perform the algorithm temp = 20.; } else { z_at_Q(1. - global_params.PhotonConsStart,&(temp)); // Multiply the result by 10 per-cent to fix instances when this isn't high enough temp *= 1.1; } } Catch(status){ return(status); // Use the status to determine if something went wrong. } *zstart = temp; return(0); } void determine_deltaz_for_photoncons() { int i, j, increasing_val, counter, smoothing_int; double temp; float z_cal, z_analytic, NF_sample, returned_value, NF_sample_min, gradient_analytic, z_analytic_at_endpoint, const_offset, z_analytic_2, smoothing_width; float bin_width, delta_NF, val1, val2, extrapolated_value; LOG_DEBUG("Determining deltaz for photon cons."); // Number of points for determine the delta z correction of the photon non-conservation N_NFsamples = 100; // Determine the change in neutral fraction to calculate the gradient for the linear extrapolation of the photon non-conservation correction delta_NF = 0.025; // A width (in neutral fraction data points) in which point we average over to try and avoid sharp features in the correction (removes some kinks) // Effectively acts as filtering step smoothing_width = 35.; // The photon non-conservation correction has a threshold (in terms of neutral fraction; global_params.PhotonConsEnd) for which we switch // from using the exact correction between the calibrated (21cmFAST all flag options off) to analytic expression to some extrapolation. // This threshold is required due to the behaviour of 21cmFAST at very low neutral fractions, which cause extreme behaviour with recombinations on // A lot of the steps and choices are not completely rubust, just chosed to smooth/average the data to have smoother resultant reionisation histories // Determine the number of extrapolated points required, if required at all. if(calibrated_NF_min < global_params.PhotonConsEnd) { // We require extrapolation, set minimum point to the threshold, and extrapolate beyond. NF_sample_min = global_params.PhotonConsEnd; // Determine the number of extrapolation points (to better smooth the correction) between the threshod (global_params.PhotonConsEnd) and a // point close to zero neutral fraction (set by global_params.PhotonConsAsymptoteTo) // Choice is to get the delta neutral fraction between extrapolated points to be similar to the cadence in the exact correction if(calibrated_NF_min > global_params.PhotonConsAsymptoteTo) { N_extrapolated = ((float)N_NFsamples - 1.)*(NF_sample_min - calibrated_NF_min)/( global_params.PhotonConsStart - NF_sample_min ); } else { N_extrapolated = ((float)N_NFsamples - 1.)*(NF_sample_min - global_params.PhotonConsAsymptoteTo)/( global_params.PhotonConsStart - NF_sample_min ); } N_extrapolated = (int)floor( N_extrapolated ) - 1; // Minus one as the zero point is added below } else { // No extrapolation required, neutral fraction never reaches zero NF_sample_min = calibrated_NF_min; N_extrapolated = 0; } // Determine the bin width for the sampling of the neutral fraction for the correction bin_width = ( global_params.PhotonConsStart - NF_sample_min )/((float)N_NFsamples - 1.); // allocate memory for arrays required to determine the photon non-conservation correction deltaz = calloc(N_NFsamples + N_extrapolated + 1,sizeof(double)); deltaz_smoothed = calloc(N_NFsamples + N_extrapolated + 1,sizeof(double)); NeutralFractions = calloc(N_NFsamples + N_extrapolated + 1,sizeof(double)); // Go through and fill the data points (neutral fraction and corresponding delta z between the calibrated and analytic curves). for(i=0;i<N_NFsamples;i++) { NF_sample = NF_sample_min + bin_width*(float)i; // Determine redshift given a neutral fraction for the calibration curve z_at_NFHist(NF_sample,&(temp)); z_cal = temp; // Determine redshift given a neutral fraction for the analytic curve z_at_Q(1. - NF_sample,&(temp)); z_analytic = temp; deltaz[i+1+N_extrapolated] = fabs( z_cal - z_analytic ); NeutralFractions[i+1+N_extrapolated] = NF_sample; } // Determining the end-point (lowest neutral fraction) for the photon non-conservation correction if(calibrated_NF_min >= global_params.PhotonConsEnd) { increasing_val = 0; counter = 0; // Check if all the values of delta z are increasing for(i=0;i<(N_NFsamples-1);i++) { if(deltaz[i+1+N_extrapolated] >= deltaz[i+N_extrapolated]) { counter += 1; } } // If all the values of delta z are increasing, then some of the smoothing of the correction done below cannot be performed if(counter==(N_NFsamples-1)) { increasing_val = 1; } // Since we never have reionisation, need to set an appropriate end-point for the correction // Take some fraction of the previous point to determine the end-point NeutralFractions[0] = 0.999*NF_sample_min; if(increasing_val) { // Values of delta z are always increasing with decreasing neutral fraction thus make the last point slightly larger deltaz[0] = 1.001*deltaz[1]; } else { // Values of delta z are always decreasing with decreasing neutral fraction thus make the last point slightly smaller deltaz[0] = 0.999*deltaz[1]; } } else { // Ok, we are going to be extrapolating the photon non-conservation (delta z) beyond the threshold // Construct a linear curve for the analytic function to extrapolate to the new endpoint // The choice for doing so is to ensure the corrected reionisation history is mostly smooth, and doesn't // artificially result in kinks due to switching between how the delta z should be calculated z_at_Q(1. - (NeutralFractions[1+N_extrapolated] + delta_NF),&(temp)); z_analytic = temp; z_at_Q(1. - NeutralFractions[1+N_extrapolated],&(temp)); z_analytic_2 = temp; // determine the linear curve // Multiplitcation by 1.1 is arbitrary but effectively smooths out most kinks observed in the resultant corrected reionisation histories gradient_analytic = 1.1*( delta_NF )/( z_analytic - z_analytic_2 ); const_offset = ( NeutralFractions[1+N_extrapolated] + delta_NF ) - gradient_analytic * z_analytic; // determine the extrapolation end point if(calibrated_NF_min > global_params.PhotonConsAsymptoteTo) { extrapolated_value = calibrated_NF_min; } else { extrapolated_value = global_params.PhotonConsAsymptoteTo; } // calculate the delta z for the extrapolated end point z_at_NFHist(extrapolated_value,&(temp)); z_cal = temp; z_analytic_at_endpoint = ( extrapolated_value - const_offset )/gradient_analytic ; deltaz[0] = fabs( z_cal - z_analytic_at_endpoint ); NeutralFractions[0] = extrapolated_value; // If performing extrapolation, add in all the extrapolated points between the end-point and the threshold to end the correction (global_params.PhotonConsEnd) for(i=0;i<N_extrapolated;i++) { if(calibrated_NF_min > global_params.PhotonConsAsymptoteTo) { NeutralFractions[i+1] = calibrated_NF_min + (NF_sample_min - calibrated_NF_min)*(float)(i+1)/((float)N_extrapolated + 1.); } else { NeutralFractions[i+1] = global_params.PhotonConsAsymptoteTo + (NF_sample_min - global_params.PhotonConsAsymptoteTo)*(float)(i+1)/((float)N_extrapolated + 1.); } deltaz[i+1] = deltaz[0] + ( deltaz[1+N_extrapolated] - deltaz[0] )*(float)(i+1)/((float)N_extrapolated + 1.); } } // We have added the extrapolated values, now check if they are all increasing or not (again, to determine whether or not to try and smooth the corrected curve increasing_val = 0; counter = 0; for(i=0;i<(N_NFsamples-1);i++) { if(deltaz[i+1+N_extrapolated] >= deltaz[i+N_extrapolated]) { counter += 1; } } if(counter==(N_NFsamples-1)) { increasing_val = 1; } // For some models, the resultant delta z for extremely high neutral fractions ( > 0.95) seem to oscillate or sometimes drop in value. // This goes through and checks if this occurs, and tries to smooth this out // This doesn't occur very often, but can cause an artificial drop in the reionisation history (neutral fraction value) connecting the // values before/after the photon non-conservation correction starts. for(i=0;i<(N_NFsamples+N_extrapolated);i++) { val1 = deltaz[i]; val2 = deltaz[i+1]; counter = 0; // Check if we have a neutral fraction above 0.95, that the values are decreasing (val2 < val1), that we haven't sampled too many points (counter) // and that the NF_sample_min is less than around 0.8. That is, if a reasonable fraction of the reionisation history is sampled. while( NeutralFractions[i+1] > 0.95 && val2 < val1 && NF_sample_min < 0.8 && counter < 100) { NF_sample = global_params.PhotonConsStart - 0.001*(counter+1); // Determine redshift given a neutral fraction for the calibration curve z_at_NFHist(NF_sample,&(temp)); z_cal = temp; // Determine redshift given a neutral fraction for the analytic curve z_at_Q(1. - NF_sample,&(temp)); z_analytic = temp; // Determine the delta z val2 = fabs( z_cal - z_analytic ); deltaz[i+1] = val2; counter += 1; // If after 100 samplings we couldn't get the value to increase (like it should), just modify it from the previous point. if(counter==100) { deltaz[i+1] = deltaz[i] * 1.01; } } } // Store the data in its intermediate state before averaging for(i=0;i<(N_NFsamples+N_extrapolated+1);i++) { deltaz_smoothed[i] = deltaz[i]; } // If we are not increasing for all values, we can smooth out some features in delta z when connecting the extrapolated delta z values // compared to those from the exact correction (i.e. when we cross the threshold). if(!increasing_val) { for(i=0;i<(N_NFsamples+N_extrapolated);i++) { val1 = deltaz[0]; val2 = deltaz[i+1]; counter = 0; // Try and find a point which can be used to smooth out any dip in delta z as a function of neutral fraction. // It can be flat, then drop, then increase. This smooths over this drop (removes a kink in the resultant reionisation history). // Choice of 75 is somewhat arbitrary while(val2 < val1 && (counter < 75 || (1+(i+1)+counter) > (N_NFsamples+N_extrapolated))) { counter += 1; val2 = deltaz[i+1+counter]; deltaz_smoothed[i+1] = ( val1 + deltaz[1+(i+1)+counter] )/2.; } if(counter==75 || (1+(i+1)+counter) > (N_NFsamples+N_extrapolated)) { deltaz_smoothed[i+1] = deltaz[i+1]; } } } // Here we effectively filter over the delta z as a function of neutral fraction to try and minimise any possible kinks etc. in the functional curve. for(i=0;i<(N_NFsamples+N_extrapolated+1);i++) { // We are at the end-points, cannot smooth if(i==0 || i==(N_NFsamples+N_extrapolated)) { deltaz[i] = deltaz_smoothed[i]; } else { deltaz[i] = 0.; // We are symmetrically smoothing, making sure we have the same number of data points either side of the point we are filtering over // This determins the filter width when close to the edge of the data ranges if( (i - (int)floor(smoothing_width/2.) ) < 0) { smoothing_int = 2*( i ) + (int)((int)smoothing_width%2); } else if( (i - (int)floor(smoothing_width/2.) + ((int)smoothing_width - 1) ) > (N_NFsamples + N_extrapolated) ) { smoothing_int = ((int)smoothing_width - 1) - 2*((i - (int)floor(smoothing_width/2.) + ((int)smoothing_width - 1) ) - (N_NFsamples + N_extrapolated) ) + (int)((int)smoothing_width%2); } else { smoothing_int = (int)smoothing_width; } // Average (filter) over the delta z values to smooth the result counter = 0; for(j=0;j<(int)smoothing_width;j++) { if(((i - (int)floor((float)smoothing_int/2.) + j)>=0) && ((i - (int)floor((float)smoothing_int/2.) + j) <= (N_NFsamples + N_extrapolated + 1)) && counter < smoothing_int ) { deltaz[i] += deltaz_smoothed[i - (int)floor((float)smoothing_int/2.) + j]; counter += 1; } } deltaz[i] /= (float)counter; } } N_deltaz = N_NFsamples + N_extrapolated + 1; // Now, we can construct the spline of the photon non-conservation correction (delta z as a function of neutral fraction) deltaz_spline_for_photoncons_acc = gsl_interp_accel_alloc (); deltaz_spline_for_photoncons = gsl_spline_alloc (gsl_interp_linear, N_NFsamples + N_extrapolated + 1); gsl_set_error_handler_off(); int gsl_status; gsl_status = gsl_spline_init(deltaz_spline_for_photoncons, NeutralFractions, deltaz, N_NFsamples + N_extrapolated + 1); GSL_ERROR(gsl_status); } float adjust_redshifts_for_photoncons( struct AstroParams *astro_params, struct FlagOptions *flag_options, float *redshift, float *stored_redshift, float *absolute_delta_z ) { int i, new_counter; double temp; float required_NF, adjusted_redshift, future_z, gradient_extrapolation, const_extrapolation, temp_redshift, check_required_NF; LOG_DEBUG("Adjusting redshifts for photon cons."); if(*redshift < global_params.PhotonConsEndCalibz) { LOG_ERROR( "You have passed a redshift (z = %f) that is lower than the enpoint of the photon non-conservation correction "\ "(global_params.PhotonConsEndCalibz = %f). If this behaviour is desired then set global_params.PhotonConsEndCalibz "\ "to a value lower than z = %f.",*redshift,global_params.PhotonConsEndCalibz,*redshift ); // Throw(ParameterError); Throw(PhotonConsError); } // Determine the neutral fraction (filling factor) of the analytic calibration expression given the current sampled redshift Q_at_z(*redshift, &(temp)); required_NF = 1.0 - (float)temp; // Find which redshift we need to sample in order for the calibration reionisation history to match the analytic expression if(required_NF > global_params.PhotonConsStart) { // We haven't started ionising yet, so keep redshifts the same adjusted_redshift = *redshift; *absolute_delta_z = 0.; } else if(required_NF<=global_params.PhotonConsEnd) { // We have gone beyond the threshold for the end of the photon non-conservation correction // Deemed to be roughly where the calibration curve starts to approach the analytic expression if(FirstNF_Estimate <= 0. && required_NF <= 0.0) { // Reionisation has already happened well before the calibration adjusted_redshift = *redshift; } else { // We have crossed the NF threshold for the photon conservation correction so now set to the delta z at the threshold if(required_NF < global_params.PhotonConsAsymptoteTo) { // This counts the number of times we have exceeded the extrapolated point and attempts to modify the delta z // to try and make the function a little smoother *absolute_delta_z = gsl_spline_eval(deltaz_spline_for_photoncons, global_params.PhotonConsAsymptoteTo, deltaz_spline_for_photoncons_acc); new_counter = 0; temp_redshift = *redshift; check_required_NF = required_NF; // Ok, find when in the past we exceeded the asymptote threshold value using the global_params.ZPRIME_STEP_FACTOR // In doing it this way, co-eval boxes will be the same as lightcone boxes with regard to redshift sampling while( check_required_NF < global_params.PhotonConsAsymptoteTo ) { temp_redshift = ((1. + temp_redshift)*global_params.ZPRIME_STEP_FACTOR - 1.); Q_at_z(temp_redshift, &(temp)); check_required_NF = 1.0 - (float)temp; new_counter += 1; } // Now adjust the final delta_z by some amount to smooth if over successive steps if(deltaz[1] > deltaz[0]) { *absolute_delta_z = pow( 0.96 , (new_counter - 1) + 1. ) * ( *absolute_delta_z ); } else { *absolute_delta_z = pow( 1.04 , (new_counter - 1) + 1. ) * ( *absolute_delta_z ); } // Check if we go into the future (z < 0) and avoid it adjusted_redshift = (*redshift) - (*absolute_delta_z); if(adjusted_redshift < 0.0) { adjusted_redshift = 0.0; } } else { *absolute_delta_z = gsl_spline_eval(deltaz_spline_for_photoncons, required_NF, deltaz_spline_for_photoncons_acc); adjusted_redshift = (*redshift) - (*absolute_delta_z); } } } else { // Initialise the photon non-conservation correction curve if(!photon_cons_allocated) { determine_deltaz_for_photoncons(); photon_cons_allocated = true; } // We have exceeded even the end-point of the extrapolation // Just smooth ever subsequent point // Note that this is deliberately tailored to light-cone quantites, but will still work with co-eval cubes // Though might produce some very minor discrepancies when comparing outputs. if(required_NF < NeutralFractions[0]) { new_counter = 0; temp_redshift = *redshift; check_required_NF = required_NF; // Ok, find when in the past we exceeded the asymptote threshold value using the global_params.ZPRIME_STEP_FACTOR // In doing it this way, co-eval boxes will be the same as lightcone boxes with regard to redshift sampling while( check_required_NF < NeutralFractions[0] ) { temp_redshift = ((1. + temp_redshift)*global_params.ZPRIME_STEP_FACTOR - 1.); Q_at_z(temp_redshift, &(temp)); check_required_NF = 1.0 - (float)temp; new_counter += 1; } if(new_counter > 5) { LOG_WARNING( "The photon non-conservation correction has employed an extrapolation for\n"\ "more than 5 consecutive snapshots. This can be unstable, thus please check "\ "resultant history. Parameters are:\n" ); #if LOG_LEVEL >= LOG_WARNING writeAstroParams(flag_options, astro_params); #endif } // Now adjust the final delta_z by some amount to smooth if over successive steps if(deltaz[1] > deltaz[0]) { *absolute_delta_z = pow( 0.998 , (new_counter - 1) + 1. ) * ( *absolute_delta_z ); } else { *absolute_delta_z = pow( 1.002 , (new_counter - 1) + 1. ) * ( *absolute_delta_z ); } // Check if we go into the future (z < 0) and avoid it adjusted_redshift = (*redshift) - (*absolute_delta_z); if(adjusted_redshift < 0.0) { adjusted_redshift = 0.0; } } else { // Find the corresponding redshift for the calibration curve given the required neutral fraction (filling factor) from the analytic expression *absolute_delta_z = gsl_spline_eval(deltaz_spline_for_photoncons, (double)required_NF, deltaz_spline_for_photoncons_acc); adjusted_redshift = (*redshift) - (*absolute_delta_z); } } // keep the original sampled redshift *stored_redshift = *redshift; // This redshift snapshot now uses the modified redshift following the photon non-conservation correction *redshift = adjusted_redshift; } void Q_at_z(double z, double *splined_value){ float returned_value; if (z >= Zmax) { *splined_value = 0.; } else if (z <= Zmin) { *splined_value = 1.; } else { returned_value = gsl_spline_eval(Q_at_z_spline, z, Q_at_z_spline_acc); *splined_value = returned_value; } } void z_at_Q(double Q, double *splined_value){ float returned_value; if (Q < Qmin) { LOG_ERROR("The minimum value of Q is %.4e",Qmin); // Throw(ParameterError); Throw(PhotonConsError); } else if (Q > Qmax) { LOG_ERROR("The maximum value of Q is %.4e. Reionization ends at ~%.4f.",Qmax,Zmin); LOG_ERROR("This error can occur if global_params.PhotonConsEndCalibz is close to "\ "the final sampled redshift. One can consider a lower value for "\ "global_params.PhotonConsEndCalibz to mitigate this"); // Throw(ParameterError); Throw(PhotonConsError); } else { returned_value = gsl_spline_eval(z_at_Q_spline, Q, z_at_Q_spline_acc); *splined_value = returned_value; } } void free_Q_value() { gsl_spline_free (Q_at_z_spline); gsl_interp_accel_free (Q_at_z_spline_acc); gsl_spline_free (z_at_Q_spline); gsl_interp_accel_free (z_at_Q_spline_acc); } void initialise_NFHistory_spline(double *redshifts, double *NF_estimate, int NSpline){ int i, counter, start_index, found_start_index; // This takes in the data for the calibration curve for the photon non-conservation correction counter = 0; start_index = 0; found_start_index = 0; FinalNF_Estimate = NF_estimate[0]; FirstNF_Estimate = NF_estimate[NSpline-1]; // Determine the point in the data where its no longer zero (basically to avoid too many zeros in the spline) for(i=0;i<NSpline-1;i++) { if(NF_estimate[i+1] > NF_estimate[i]) { if(found_start_index == 0) { start_index = i; found_start_index = 1; } } counter += 1; } counter = counter - start_index; N_calibrated = (counter+1); // Store the data points for determining the photon non-conservation correction nf_vals = calloc((counter+1),sizeof(double)); z_vals = calloc((counter+1),sizeof(double)); calibrated_NF_min = 1.; // Store the data, and determine the end point of the input data for estimating the extrapolated results for(i=0;i<(counter+1);i++) { nf_vals[i] = NF_estimate[start_index+i]; z_vals[i] = redshifts[start_index+i]; // At the extreme high redshift end, there can be numerical issues with the solution of the analytic expression if(i>0) { while(nf_vals[i] <= nf_vals[i-1]) { nf_vals[i] += 0.000001; } } if(nf_vals[i] < calibrated_NF_min) { calibrated_NF_min = nf_vals[i]; } } NFHistory_spline_acc = gsl_interp_accel_alloc (); // NFHistory_spline = gsl_spline_alloc (gsl_interp_cspline, (counter+1)); NFHistory_spline = gsl_spline_alloc (gsl_interp_linear, (counter+1)); gsl_set_error_handler_off(); int gsl_status; gsl_status = gsl_spline_init(NFHistory_spline, nf_vals, z_vals, (counter+1)); GSL_ERROR(gsl_status); z_NFHistory_spline_acc = gsl_interp_accel_alloc (); // z_NFHistory_spline = gsl_spline_alloc (gsl_interp_cspline, (counter+1)); z_NFHistory_spline = gsl_spline_alloc (gsl_interp_linear, (counter+1)); gsl_status = gsl_spline_init(z_NFHistory_spline, z_vals, nf_vals, (counter+1)); GSL_ERROR(gsl_status); } void z_at_NFHist(double xHI_Hist, double *splined_value){ float returned_value; returned_value = gsl_spline_eval(NFHistory_spline, xHI_Hist, NFHistory_spline_acc); *splined_value = returned_value; } void NFHist_at_z(double z, double *splined_value){ float returned_value; returned_value = gsl_spline_eval(z_NFHistory_spline, z, NFHistory_spline_acc); *splined_value = returned_value; } int ObtainPhotonConsData( double *z_at_Q_data, double *Q_data, int *Ndata_analytic, double *z_cal_data, double *nf_cal_data, int *Ndata_calibration, double *PhotonCons_NFdata, double *PhotonCons_deltaz, int *Ndata_PhotonCons) { int i; *Ndata_analytic = N_analytic; *Ndata_calibration = N_calibrated; *Ndata_PhotonCons = N_deltaz; for(i=0;i<N_analytic;i++) { z_at_Q_data[i] = z_Q[i]; Q_data[i] = Q_value[i]; } for(i=0;i<N_calibrated;i++) { z_cal_data[i] = z_vals[i]; nf_cal_data[i] = nf_vals[i]; } for(i=0;i<N_deltaz;i++) { PhotonCons_NFdata[i] = NeutralFractions[i]; PhotonCons_deltaz[i] = deltaz[i]; } return(0); } void FreePhotonConsMemory() { LOG_DEBUG("Freeing some photon cons memory."); free(deltaz); free(deltaz_smoothed); free(NeutralFractions); free(z_Q); free(Q_value); free(nf_vals); free(z_vals); free_Q_value(); gsl_spline_free (NFHistory_spline); gsl_interp_accel_free (NFHistory_spline_acc); gsl_spline_free (z_NFHistory_spline); gsl_interp_accel_free (z_NFHistory_spline_acc); gsl_spline_free (deltaz_spline_for_photoncons); gsl_interp_accel_free (deltaz_spline_for_photoncons_acc); LOG_DEBUG("Done Freeing photon cons memory."); photon_cons_allocated = false; } void FreeTsInterpolationTables(struct FlagOptions *flag_options) { LOG_DEBUG("Freeing some interpolation table memory."); freeSigmaMInterpTable(); if (flag_options->USE_MASS_DEPENDENT_ZETA) { free(z_val); z_val = NULL; free(Nion_z_val); free(z_X_val); z_X_val = NULL; free(SFRD_val); if (flag_options->USE_MINI_HALOS){ free(Nion_z_val_MINI); free(SFRD_val_MINI); } } else{ free(FgtrM_1DTable_linear); } LOG_DEBUG("Done Freeing interpolation table memory."); interpolation_tables_allocated = false; }
correlation.c
/** * This version is stamped on May 10, 2016 * * Contact: * Louis-Noel Pouchet <pouchet.ohio-state.edu> * Tomofumi Yuki <tomofumi.yuki.fr> * * Web address: http://polybench.sourceforge.net */ /* correlation.c: this file is part of PolyBench/C */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ #include "correlation.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data, N, M, n, m)) { int i, j; *float_n = (DATA_TYPE)N; for (i = 0; i < N; i++) for (j = 0; j < M; j++) data[i][j] = (DATA_TYPE)(i * j) / M + i; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(corr, M, M, m, m)) { int i, j; POLYBENCH_DUMP_START; POLYBENCH_DUMP_BEGIN("corr"); for (i = 0; i < m; i++) for (j = 0; j < m; j++) { if ((i * m + j) % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n"); fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, corr[i][j]); } POLYBENCH_DUMP_END("corr"); POLYBENCH_DUMP_FINISH; } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_correlation(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data, N, M, n, m), DATA_TYPE POLYBENCH_2D(corr, M, M, m, m), DATA_TYPE POLYBENCH_1D(mean, M, m), DATA_TYPE POLYBENCH_1D(stddev, M, m)) { int i, j, k; DATA_TYPE eps = SCALAR_VAL(0.1); #pragma omp parallel for default(shared) private(j, i) firstprivate(m, n, float_n, data) for (j = 0; j < _PB_M; j++) { mean[j] = SCALAR_VAL(0.0); for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } #pragma omp parallel for default(shared) private(j, i) firstprivate(m, n, float_n, eps, data, mean) for (j = 0; j < _PB_M; j++) { stddev[j] = SCALAR_VAL(0.0); for (i = 0; i < _PB_N; i++) stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]); stddev[j] /= float_n; stddev[j] = SQRT_FUN(stddev[j]); stddev[j] = stddev[j] <= eps ? SCALAR_VAL(1.0) : stddev[j]; } #pragma omp parallel for default(shared) private(i, j) firstprivate(n, m, float_n, mean, stddev) for (i = 0; i < _PB_N; i++) { for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; data[i][j] /= SQRT_FUN(float_n) * stddev[j]; } } #pragma omp parallel for default(shared) private(i, j, k) firstprivate(m, n, data) for (i = 0; i < _PB_M - 1; i++) { corr[i][i] = SCALAR_VAL(1.0); for (j = i + 1; j < _PB_M; j++) { corr[i][j] = SCALAR_VAL(0.0); for (k = 0; k < _PB_N; k++) corr[i][j] += (data[k][i] * data[k][j]); corr[j][i] = corr[i][j]; } } corr[_PB_M - 1][_PB_M - 1] = SCALAR_VAL(1.0); } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data, DATA_TYPE, N, M, n, m); POLYBENCH_2D_ARRAY_DECL(corr, DATA_TYPE, M, M, m, m); POLYBENCH_1D_ARRAY_DECL(mean, DATA_TYPE, M, m); POLYBENCH_1D_ARRAY_DECL(stddev, DATA_TYPE, M, m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_correlation (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(corr), POLYBENCH_ARRAY(mean), POLYBENCH_ARRAY(stddev)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(corr))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(corr); POLYBENCH_FREE_ARRAY(mean); POLYBENCH_FREE_ARRAY(stddev); return 0; }
test-float-libmvec-sincosf-main.c
/* Test for vector sincosf ABI. Copyright (C) 2016-2020 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <https://www.gnu.org/licenses/>. */ #include <math.h> #define N 1000 float x[N], s[N], c[N]; float *s_ptrs[N]; float *c_ptrs[N]; int test_sincosf_abi (void) { int i; for(i = 0; i < N; i++) { x[i] = i / 3; s_ptrs[i] = &s[i]; c_ptrs[i] = &c[i]; } #pragma omp simd for(i = 0; i < N; i++) sincosf (x[i], s_ptrs[i], c_ptrs[i]); return 0; }
GB_cumsum.c
//------------------------------------------------------------------------------ // GB_cumsum: cumlative sum of an array //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Compute the cumulative sum of an array count[0:n], of size n+1 // in pseudo-MATLAB notation: // k = sum (count [0:n-1] != 0) ; // count = cumsum ([0 count[0:n-1]]) ; // That is, count [j] on input is overwritten with the value of // sum (count [0..j-1]). count [n] is implicitly zero on input. // On output, count [n] is the total sum. #include "GB.h" // TODO for GPU: add error handling and GrB_Info return value GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only void GB_cumsum // cumulative sum of an array ( int64_t *GB_RESTRICT count, // size n+1, input/output const int64_t n, int64_t *GB_RESTRICT kresult, // return k, if needed by the caller int nthreads ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (count != NULL) ; ASSERT (n >= 0) ; //-------------------------------------------------------------------------- // determine # of threads to use //-------------------------------------------------------------------------- #if !defined ( _OPENMP ) nthreads = 1 ; #endif if (nthreads > 1) { nthreads = GB_IMIN (nthreads, n / 1024) ; nthreads = GB_IMAX (nthreads, 1) ; } //-------------------------------------------------------------------------- // count = cumsum ([0 count[0:n-1]]) ; //-------------------------------------------------------------------------- if (kresult == NULL) { #if 0 // FIXME #if defined ( GBCUDA ) if (GB_cuda_is_on_GPU (count)) { // 'count' is already on the GPU: compute the cumulative sum there GB_cuda_cumsum (count, n) ; } else #endif #endif if (nthreads <= 2) { //------------------------------------------------------------------ // cumsum with one thread //------------------------------------------------------------------ int64_t s = 0 ; for (int64_t i = 0 ; i < n ; i++) { int64_t c = count [i] ; count [i] = s ; s += c ; } count [n] = s ; } else { //------------------------------------------------------------------ // cumsum with multiple threads //------------------------------------------------------------------ // allocate workspace int64_t *ws = GB_MALLOC (nthreads, int64_t) ; if (ws == NULL) { // out of memory; use a single thread instead GB_cumsum (count, n, NULL, 1) ; return ; } int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { // each task sums up its own part int64_t istart, iend ; GB_PARTITION (istart, iend, n, tid, nthreads) ; int64_t s = 0 ; for (int64_t i = istart ; i < iend ; i++) { s += count [i] ; } ws [tid] = s ; } #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { // each tasks computes the cumsum of its own part int64_t istart, iend ; GB_PARTITION (istart, iend, n, tid, nthreads) ; int64_t s = 0 ; for (int i = 0 ; i < tid ; i++) { s += ws [i] ; } for (int64_t i = istart ; i < iend ; i++) { int64_t c = count [i] ; count [i] = s ; s += c ; } if (iend == n) { count [n] = s ; } } // free workspace GB_FREE (ws) ; } } else { #if 0 // TODO for GPU: pop count on the GPU for GB_cumsum #if defined ( GBCUDA ) if (GB_cuda_is_on_GPU (count)) { // 'count' is already on the GPU: compute the cumulative sum there (*kresult) = GB_cuda_population_count_int64 (count, n) ; GB_cuda_cumsum (count, n) ; } else #endif #endif if (nthreads <= 2) { //------------------------------------------------------------------ // cumsum with one thread, also compute k //------------------------------------------------------------------ int64_t k = 0 ; int64_t s = 0 ; for (int64_t i = 0 ; i < n ; i++) { int64_t c = count [i] ; if (c != 0) k++ ; count [i] = s ; s += c ; } count [n] = s ; (*kresult) = k ; } else { //------------------------------------------------------------------ // cumsum with multiple threads, also compute k //------------------------------------------------------------------ int64_t *ws = GB_MALLOC (2*nthreads, int64_t) ; if (ws == NULL) { // out of memory; use a single thread instead GB_cumsum (count, n, kresult, 1) ; return ; } int64_t *wk = ws + nthreads ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { // each task sums up its own part int64_t istart, iend ; GB_PARTITION (istart, iend, n, tid, nthreads) ; int64_t k = 0 ; int64_t s = 0 ; for (int64_t i = istart ; i < iend ; i++) { int64_t c = count [i] ; if (c != 0) k++ ; s += c ; } ws [tid] = s ; wk [tid] = k ; } #pragma omp parallel for num_threads(nthreads) schedule(static) for (tid = 0 ; tid < nthreads ; tid++) { // each task computes the cumsum of its own part int64_t istart, iend ; GB_PARTITION (istart, iend, n, tid, nthreads) ; int64_t s = 0 ; for (int i = 0 ; i < tid ; i++) { s += ws [i] ; } for (int64_t i = istart ; i < iend ; i++) { int64_t c = count [i] ; count [i] = s ; s += c ; } if (iend == n) { count [n] = s ; } } int64_t k = 0 ; for (int tid = 0 ; tid < nthreads ; tid++) { k += wk [tid] ; } (*kresult) = k ; // free workspace GB_FREE (ws) ; } } }
pr51339.c
/* PR c/51339 */ /* { dg-do compile } */ /* { dg-options "-fopenmp" } */ char g[] = "g"; void foo (void) { #pragma omp parallel sections firstprivate (g) lastprivate (g) { #pragma omp section g[0] = 'h'; } }
convolution_sgemm_pack8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack8_avx(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { // Mat bottom_im2col(size, maxk, inch, 32u, 8, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; const float* bias = _bias; // permute Mat tmp; if (size >= 12) tmp.create(12 * maxk, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, 32u, 8, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 32u, 8, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 32u, 8, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 32u, 8, opt.workspace_allocator); else tmp.create(maxk, inch, size, 32u, 8, opt.workspace_allocator); { int nn_size = size / 12; int remain_size_start = 0; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 12; float* tmpptr = tmp.channel(i / 12); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { // transpose 8x12 __m256 _r0 = _mm256_load_ps(img0); __m256 _r1 = _mm256_load_ps(img0 + 8); __m256 _r2 = _mm256_load_ps(img0 + 8 * 2); __m256 _r3 = _mm256_load_ps(img0 + 8 * 3); __m256 _r4 = _mm256_load_ps(img0 + 8 * 4); __m256 _r5 = _mm256_load_ps(img0 + 8 * 5); __m256 _r6 = _mm256_load_ps(img0 + 8 * 6); __m256 _r7 = _mm256_load_ps(img0 + 8 * 7); __m256 _r8 = _mm256_load_ps(img0 + 8 * 8); __m256 _r9 = _mm256_load_ps(img0 + 8 * 9); __m256 _ra = _mm256_load_ps(img0 + 8 * 10); __m256 _rb = _mm256_load_ps(img0 + 8 * 11); __m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1); __m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1); __m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3); __m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3); __m256 _tmp4 = _mm256_unpacklo_ps(_r4, _r5); __m256 _tmp5 = _mm256_unpackhi_ps(_r4, _r5); __m256 _tmp6 = _mm256_unpacklo_ps(_r6, _r7); __m256 _tmp7 = _mm256_unpackhi_ps(_r6, _r7); __m256 _tmp8 = _mm256_unpacklo_ps(_r8, _r9); __m256 _tmp9 = _mm256_unpackhi_ps(_r8, _r9); __m256 _tmpa = _mm256_unpacklo_ps(_ra, _rb); __m256 _tmpb = _mm256_unpackhi_ps(_ra, _rb); __m256 _tmpc = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpd = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpe = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpf = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpg = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmph = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpi = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpj = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpk = _mm256_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpl = _mm256_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpm = _mm256_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpn = _mm256_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(3, 2, 3, 2)); _r0 = _mm256_permute2f128_ps(_tmpc, _tmpg, _MM_SHUFFLE(0, 2, 0, 0)); _r1 = _mm256_permute2f128_ps(_tmpk, _tmpd, _MM_SHUFFLE(0, 2, 0, 0)); _r2 = _mm256_permute2f128_ps(_tmph, _tmpl, _MM_SHUFFLE(0, 2, 0, 0)); _r3 = _mm256_permute2f128_ps(_tmpe, _tmpi, _MM_SHUFFLE(0, 2, 0, 0)); _r4 = _mm256_permute2f128_ps(_tmpm, _tmpf, _MM_SHUFFLE(0, 2, 0, 0)); _r5 = _mm256_permute2f128_ps(_tmpj, _tmpn, _MM_SHUFFLE(0, 2, 0, 0)); _r6 = _mm256_permute2f128_ps(_tmpc, _tmpg, _MM_SHUFFLE(0, 3, 0, 1)); _r7 = _mm256_permute2f128_ps(_tmpk, _tmpd, _MM_SHUFFLE(0, 3, 0, 1)); _r8 = _mm256_permute2f128_ps(_tmph, _tmpl, _MM_SHUFFLE(0, 3, 0, 1)); _r9 = _mm256_permute2f128_ps(_tmpe, _tmpi, _MM_SHUFFLE(0, 3, 0, 1)); _ra = _mm256_permute2f128_ps(_tmpm, _tmpf, _MM_SHUFFLE(0, 3, 0, 1)); _rb = _mm256_permute2f128_ps(_tmpj, _tmpn, _MM_SHUFFLE(0, 3, 0, 1)); _mm256_store_ps(tmpptr, _r0); _mm256_store_ps(tmpptr + 8, _r1); _mm256_store_ps(tmpptr + 8 * 2, _r2); _mm256_store_ps(tmpptr + 8 * 3, _r3); _mm256_store_ps(tmpptr + 8 * 4, _r4); _mm256_store_ps(tmpptr + 8 * 5, _r5); _mm256_store_ps(tmpptr + 8 * 6, _r6); _mm256_store_ps(tmpptr + 8 * 7, _r7); _mm256_store_ps(tmpptr + 8 * 8, _r8); _mm256_store_ps(tmpptr + 8 * 9, _r9); _mm256_store_ps(tmpptr + 8 * 10, _ra); _mm256_store_ps(tmpptr + 8 * 11, _rb); img0 += size * 8; tmpptr += 96; } } } remain_size_start += nn_size * 12; nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { // transpose 8x8 __m256 _r0 = _mm256_load_ps(img0); __m256 _r1 = _mm256_load_ps(img0 + 8); __m256 _r2 = _mm256_load_ps(img0 + 8 * 2); __m256 _r3 = _mm256_load_ps(img0 + 8 * 3); __m256 _r4 = _mm256_load_ps(img0 + 8 * 4); __m256 _r5 = _mm256_load_ps(img0 + 8 * 5); __m256 _r6 = _mm256_load_ps(img0 + 8 * 6); __m256 _r7 = _mm256_load_ps(img0 + 8 * 7); __m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1); __m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1); __m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3); __m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3); __m256 _tmp4 = _mm256_unpacklo_ps(_r4, _r5); __m256 _tmp5 = _mm256_unpackhi_ps(_r4, _r5); __m256 _tmp6 = _mm256_unpacklo_ps(_r6, _r7); __m256 _tmp7 = _mm256_unpackhi_ps(_r6, _r7); __m256 _tmp8 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmp9 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpa = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpb = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpc = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpd = _mm256_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmpe = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmpf = _mm256_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2)); _r0 = _mm256_permute2f128_ps(_tmp8, _tmpc, _MM_SHUFFLE(0, 2, 0, 0)); _r1 = _mm256_permute2f128_ps(_tmp9, _tmpd, _MM_SHUFFLE(0, 2, 0, 0)); _r2 = _mm256_permute2f128_ps(_tmpa, _tmpe, _MM_SHUFFLE(0, 2, 0, 0)); _r3 = _mm256_permute2f128_ps(_tmpb, _tmpf, _MM_SHUFFLE(0, 2, 0, 0)); _r4 = _mm256_permute2f128_ps(_tmp8, _tmpc, _MM_SHUFFLE(0, 3, 0, 1)); _r5 = _mm256_permute2f128_ps(_tmp9, _tmpd, _MM_SHUFFLE(0, 3, 0, 1)); _r6 = _mm256_permute2f128_ps(_tmpa, _tmpe, _MM_SHUFFLE(0, 3, 0, 1)); _r7 = _mm256_permute2f128_ps(_tmpb, _tmpf, _MM_SHUFFLE(0, 3, 0, 1)); _mm256_store_ps(tmpptr, _r0); _mm256_store_ps(tmpptr + 8, _r1); _mm256_store_ps(tmpptr + 8 * 2, _r2); _mm256_store_ps(tmpptr + 8 * 3, _r3); _mm256_store_ps(tmpptr + 8 * 4, _r4); _mm256_store_ps(tmpptr + 8 * 5, _r5); _mm256_store_ps(tmpptr + 8 * 6, _r6); _mm256_store_ps(tmpptr + 8 * 7, _r7); img0 += size * 8; tmpptr += 64; } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { // transpose 8x4 __m256 _r0 = _mm256_load_ps(img0); __m256 _r1 = _mm256_load_ps(img0 + 8); __m256 _r2 = _mm256_load_ps(img0 + 8 * 2); __m256 _r3 = _mm256_load_ps(img0 + 8 * 3); __m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1); __m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1); __m256 _tmp2 = _mm256_unpacklo_ps(_r2, _r3); __m256 _tmp3 = _mm256_unpackhi_ps(_r2, _r3); __m256 _tmp4 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmp5 = _mm256_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2)); __m256 _tmp6 = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0)); __m256 _tmp7 = _mm256_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2)); _r0 = _mm256_permute2f128_ps(_tmp4, _tmp5, _MM_SHUFFLE(0, 2, 0, 0)); _r1 = _mm256_permute2f128_ps(_tmp6, _tmp7, _MM_SHUFFLE(0, 2, 0, 0)); _r2 = _mm256_permute2f128_ps(_tmp4, _tmp5, _MM_SHUFFLE(0, 3, 0, 1)); _r3 = _mm256_permute2f128_ps(_tmp6, _tmp7, _MM_SHUFFLE(0, 3, 0, 1)); _mm256_store_ps(tmpptr, _r0); _mm256_store_ps(tmpptr + 8, _r1); _mm256_store_ps(tmpptr + 8 * 2, _r2); _mm256_store_ps(tmpptr + 8 * 3, _r3); img0 += size * 8; tmpptr += 32; } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { // transpose 8x2 __m256 _r0 = _mm256_load_ps(img0); __m256 _r1 = _mm256_load_ps(img0 + 8); __m256 _tmp0 = _mm256_unpacklo_ps(_r0, _r1); __m256 _tmp1 = _mm256_unpackhi_ps(_r0, _r1); _r0 = _mm256_permute2f128_ps(_tmp0, _tmp1, _MM_SHUFFLE(0, 2, 0, 0)); _r1 = _mm256_permute2f128_ps(_tmp0, _tmp1, _MM_SHUFFLE(0, 3, 0, 1)); _mm256_store_ps(tmpptr, _r0); _mm256_store_ps(tmpptr + 8, _r1); img0 += size * 8; tmpptr += 16; } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { __m256 _val = _mm256_load_ps(img0); _mm256_store_ps(tmpptr, _val); img0 += size * 8; tmpptr += 8; } } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr0 = top_blob.channel(p); const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p * 8 : zeros; int i = 0; for (; i + 11 < size; i += 12) { const float* tmpptr = tmp.channel(i / 12); const float* kptr0 = kernel.channel(p); int nn = inch * maxk * 8; // inch always > 0 __m256 _sum0 = _mm256_loadu_ps(biasptr); __m256 _sum1 = _sum0; __m256 _sum2 = _sum0; __m256 _sum3 = _sum0; __m256 _sum4 = _sum0; __m256 _sum5 = _sum0; __m256 _sum6 = _sum0; __m256 _sum7 = _sum0; __m256 _sum8 = _sum0; __m256 _sum9 = _sum0; __m256 _suma = _sum0; __m256 _sumb = _sum0; for (int j = 0; j < nn; j++) { __m256 _w0 = _mm256_load_ps(kptr0); __m256 _val0 = _mm256_broadcast_ss(tmpptr); __m256 _val1 = _mm256_broadcast_ss(tmpptr + 1); _sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1); __m256 _val2 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val3 = _mm256_broadcast_ss(tmpptr + 3); _sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3); __m256 _val4 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val5 = _mm256_broadcast_ss(tmpptr + 5); _sum4 = _mm256_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm256_comp_fmadd_ps(_val5, _w0, _sum5); __m256 _val6 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val7 = _mm256_broadcast_ss(tmpptr + 7); _sum6 = _mm256_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm256_comp_fmadd_ps(_val7, _w0, _sum7); __m256 _val8 = _mm256_broadcast_ss(tmpptr + 8); __m256 _val9 = _mm256_broadcast_ss(tmpptr + 9); _sum8 = _mm256_comp_fmadd_ps(_val8, _w0, _sum8); _sum9 = _mm256_comp_fmadd_ps(_val9, _w0, _sum9); __m256 _vala = _mm256_broadcast_ss(tmpptr + 10); __m256 _valb = _mm256_broadcast_ss(tmpptr + 11); _suma = _mm256_comp_fmadd_ps(_vala, _w0, _suma); _sumb = _mm256_comp_fmadd_ps(_valb, _w0, _sumb); tmpptr += 12; kptr0 += 8; } _mm256_store_ps(outptr0, _sum0); _mm256_store_ps(outptr0 + 8, _sum1); _mm256_store_ps(outptr0 + 8 * 2, _sum2); _mm256_store_ps(outptr0 + 8 * 3, _sum3); _mm256_store_ps(outptr0 + 8 * 4, _sum4); _mm256_store_ps(outptr0 + 8 * 5, _sum5); _mm256_store_ps(outptr0 + 8 * 6, _sum6); _mm256_store_ps(outptr0 + 8 * 7, _sum7); _mm256_store_ps(outptr0 + 8 * 8, _sum8); _mm256_store_ps(outptr0 + 8 * 9, _sum9); _mm256_store_ps(outptr0 + 8 * 10, _suma); _mm256_store_ps(outptr0 + 8 * 11, _sumb); outptr0 += 8 * 12; } for (; i + 7 < size; i += 8) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); const float* kptr0 = kernel.channel(p); int nn = inch * maxk * 8; // inch always > 0 __m256 _sum0 = _mm256_loadu_ps(biasptr); __m256 _sum1 = _sum0; __m256 _sum2 = _sum0; __m256 _sum3 = _sum0; __m256 _sum4 = _sum0; __m256 _sum5 = _sum0; __m256 _sum6 = _sum0; __m256 _sum7 = _sum0; for (int j = 0; j < nn; j++) { __m256 _w0 = _mm256_load_ps(kptr0); __m256 _val0 = _mm256_broadcast_ss(tmpptr); __m256 _val1 = _mm256_broadcast_ss(tmpptr + 1); _sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1); __m256 _val2 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val3 = _mm256_broadcast_ss(tmpptr + 3); _sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3); __m256 _val4 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val5 = _mm256_broadcast_ss(tmpptr + 5); _sum4 = _mm256_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm256_comp_fmadd_ps(_val5, _w0, _sum5); __m256 _val6 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val7 = _mm256_broadcast_ss(tmpptr + 7); _sum6 = _mm256_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm256_comp_fmadd_ps(_val7, _w0, _sum7); tmpptr += 8; kptr0 += 8; } _mm256_store_ps(outptr0, _sum0); _mm256_store_ps(outptr0 + 8, _sum1); _mm256_store_ps(outptr0 + 8 * 2, _sum2); _mm256_store_ps(outptr0 + 8 * 3, _sum3); _mm256_store_ps(outptr0 + 8 * 4, _sum4); _mm256_store_ps(outptr0 + 8 * 5, _sum5); _mm256_store_ps(outptr0 + 8 * 6, _sum6); _mm256_store_ps(outptr0 + 8 * 7, _sum7); outptr0 += 8 * 8; } for (; i + 3 < size; i += 4) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* kptr0 = kernel.channel(p); int nn = inch * maxk * 8; // inch always > 0 __m256 _sum0 = _mm256_loadu_ps(biasptr); __m256 _sum1 = _sum0; __m256 _sum2 = _sum0; __m256 _sum3 = _sum0; for (int j = 0; j < nn; j++) { __m256 _w0 = _mm256_load_ps(kptr0); __m256 _val0 = _mm256_broadcast_ss(tmpptr); __m256 _val1 = _mm256_broadcast_ss(tmpptr + 1); _sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1); __m256 _val2 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val3 = _mm256_broadcast_ss(tmpptr + 3); _sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3); tmpptr += 4; kptr0 += 8; } _mm256_store_ps(outptr0, _sum0); _mm256_store_ps(outptr0 + 8, _sum1); _mm256_store_ps(outptr0 + 8 * 2, _sum2); _mm256_store_ps(outptr0 + 8 * 3, _sum3); outptr0 += 8 * 4; } for (; i + 1 < size; i += 2) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* kptr0 = kernel.channel(p); int nn = inch * maxk * 8; // inch always > 0 __m256 _sum0 = _mm256_loadu_ps(biasptr); __m256 _sum1 = _sum0; for (int j = 0; j < nn; j++) { __m256 _w0 = _mm256_load_ps(kptr0); __m256 _val0 = _mm256_broadcast_ss(tmpptr); __m256 _val1 = _mm256_broadcast_ss(tmpptr + 1); _sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1); tmpptr += 2; kptr0 += 8; } _mm256_store_ps(outptr0, _sum0); _mm256_store_ps(outptr0 + 8, _sum1); outptr0 += 8 * 2; } for (; i < size; i++) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* kptr0 = kernel.channel(p); int nn = inch * maxk * 8; // inch always > 0 __m256 _sum = _mm256_loadu_ps(biasptr); for (int j = 0; j < nn; j++) { __m256 _w0 = _mm256_load_ps(kptr0); __m256 _val0 = _mm256_broadcast_ss(tmpptr); _sum = _mm256_comp_fmadd_ps(_val0, _w0, _sum); tmpptr += 1; kptr0 += 8; } _mm256_store_ps(outptr0, _sum); outptr0 += 8; } } } static void convolution_im2col_sgemm_transform_kernel_pack8_avx(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = 8b-8a-maxk-inch/8a-outch/8b Mat kernel = _kernel.reshape(maxk, inch, outch); kernel_tm.create(64 * maxk, inch / 8, outch / 8, (size_t)4u); for (int q = 0; q + 7 < outch; q += 8) { float* g00 = kernel_tm.channel(q / 8); for (int p = 0; p + 7 < inch; p += 8) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 8; i++) { for (int j = 0; j < 8; j++) { const float* k00 = kernel.channel(q + j).row(p + i); g00[0] = k00[k]; g00++; } } } } } } static void convolution_im2col_sgemm_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 32u, 8, opt.workspace_allocator); { const int gap = (w * stride_h - outw * stride_w) * 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); float* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const float* sptr = img.row(dilation_h * u) + dilation_w * v * 8; for (int i = 0; i < outh; i++) { int j = 0; for (; j < outw; j++) { __m256 _v = _mm256_load_ps(sptr); _mm256_store_ps(ptr, _v); sptr += stride_w * 8; ptr += 8; } sptr += gap; } } } } } im2col_sgemm_pack8_avx(bottom_im2col, top_blob, kernel, _bias, opt); }
pentagon_cmap.h
#pragma omp parallel for schedule(dynamic,1) reduction(+:counter) for (vidType v0 = 0; v0 < g.V(); v0++) { auto tid = omp_get_thread_num(); auto &cmap = cmaps[tid]; for (auto v1 : g.N(v0)) { for (auto u : g.N(v1)) { if (u >= v0) break; cmap[u] = 1; } if (v1 < v0) { for (auto v2 : g.N(v0)) { if (v2 >= v1) break; for (auto v3 : g.N(v2)) { if (v3 >= v0) break; if (v3 == v1) continue; for (auto v4 : g.N(v3)) { if (v4 >= v0) break; if (v4 == v2) continue; if (cmap[v4] == 1) counter ++; } } } } for (auto u : g.N(v1)) { if (u >= v0) break; cmap[u] = 0; } } }
veccopy-ompt-target-map.c
#include <stdio.h> #include <omp.h> #include "callbacks.h" int main() { int N = 100000; int a[N]; int b[N]; int i; for (i=0; i<N; i++) a[i]=0; for (i=0; i<N; i++) b[i]=i; #pragma omp target parallel for { for (int j = 0; j< N; j++) a[j]=b[j]; } #pragma omp target teams distribute parallel for { for (int j = 0; j< N; j++) a[j]=b[j]; } int rc = 0; for (i=0; i<N; i++) if (a[i] != b[i] ) { rc++; printf ("Wrong value: a[%d]=%d\n", i, a[i]); } if (!rc) printf("Success\n"); return rc; } /// CHECK: 0: Could not register callback 'ompt_callback_target_map' /// CHECK: Callback Init: /// CHECK: Callback Load: /// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=1 /// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1 /// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2 /// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1 /// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2 /// CHECK: Callback Submit: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] req_num_teams=1 /// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3 /// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3 /// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4 /// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4 /// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=2 /// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=1 /// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1 /// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2 /// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=1 /// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=2 /// CHECK: Callback Submit: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] req_num_teams=0 /// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3 /// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=3 /// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4 /// CHECK: Callback DataOp: target_id=[[TARGET_ID:[0-9]+]] host_op_id=[[HOST_OP_ID:[0-9]+]] optype=4 /// CHECK: Callback Target: target_id=[[TARGET_ID:[0-9]+]] kind=1 endpoint=2 /// CHECK: Callback Fini:
sort.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ /* * Original code from the Cilk project * * Copyright (c) 2000 Massachusetts Institute of Technology * Copyright (c) 2000 Matteo Frigo */ /* * this program uses an algorithm that we call `cilksort'. * The algorithm is essentially mergesort: * * cilksort(in[1..n]) = * spawn cilksort(in[1..n/2], tmp[1..n/2]) * spawn cilksort(in[n/2..n], tmp[n/2..n]) * sync * spawn cilkmerge(tmp[1..n/2], tmp[n/2..n], in[1..n]) * * * The procedure cilkmerge does the following: * * cilkmerge(A[1..n], B[1..m], C[1..(n+m)]) = * find the median of A \union B using binary * search. The binary search gives a pair * (ma, mb) such that ma + mb = (n + m)/2 * and all elements in A[1..ma] are smaller than * B[mb..m], and all the B[1..mb] are smaller * than all elements in A[ma..n]. * * spawn cilkmerge(A[1..ma], B[1..mb], C[1..(n+m)/2]) * spawn cilkmerge(A[ma..m], B[mb..n], C[(n+m)/2 .. (n+m)]) * sync * * The algorithm appears for the first time (AFAIK) in S. G. Akl and * N. Santoro, "Optimal Parallel Merging and Sorting Without Memory * Conflicts", IEEE Trans. Comp., Vol. C-36 No. 11, Nov. 1987 . The * paper does not express the algorithm using recursion, but the * idea of finding the median is there. * * For cilksort of n elements, T_1 = O(n log n) and * T_\infty = O(log^3 n). There is a way to shave a * log factor in the critical path (left as homework). */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "bots.h" #include "app-desc.h" ELM *array, *tmp; static unsigned long rand_nxt = 0; static inline unsigned long my_rand(void) { rand_nxt = rand_nxt * 1103515245 + 12345; return rand_nxt; } static inline void my_srand(unsigned long seed) { rand_nxt = seed; } static inline ELM med3(ELM a, ELM b, ELM c) { if (a < b) { if (b < c) { return b; } else { if (a < c) return c; else return a; } } else { if (b > c) { return b; } else { if (a > c) return c; else return a; } } } /* * simple approach for now; a better median-finding * may be preferable */ static inline ELM choose_pivot(ELM *low, ELM *high) { return med3(*low, *high, low[(high - low) / 2]); } static ELM *seqpart(ELM *low, ELM *high) { ELM pivot; ELM h, l; ELM *curr_low = low; ELM *curr_high = high; pivot = choose_pivot(low, high); while (1) { while ((h = *curr_high) > pivot) curr_high--; while ((l = *curr_low) < pivot) curr_low++; if (curr_low >= curr_high) break; *curr_high-- = l; *curr_low++ = h; } /* * I don't know if this is really necessary. * The problem is that the pivot is not always the * first element, and the partition may be trivial. * However, if the partition is trivial, then * *high is the largest element, whence the following * code. */ if (curr_high < high) return curr_high; else return curr_high - 1; } #define swap(a, b) \ { \ ELM tmp;\ tmp = a;\ a = b;\ b = tmp;\ } static void insertion_sort(ELM *low, ELM *high) { ELM *p, *q; ELM a, b; for (q = low + 1; q <= high; ++q) { a = q[0]; for (p = q - 1; p >= low && (b = p[0]) > a; p--) p[1] = b; p[1] = a; } } /* * tail-recursive quicksort, almost unrecognizable :-) */ void seqquick(ELM *low, ELM *high) { ELM *p; while (high - low >= bots_app_cutoff_value_2) { p = seqpart(low, high); seqquick(low, p); low = p + 1; } insertion_sort(low, high); } void seqmerge(ELM *low1, ELM *high1, ELM *low2, ELM *high2, ELM *lowdest) { ELM a1, a2; /* * The following 'if' statement is not necessary * for the correctness of the algorithm, and is * in fact subsumed by the rest of the function. * However, it is a few percent faster. Here is why. * * The merging loop below has something like * if (a1 < a2) { * *dest++ = a1; * ++low1; * if (end of array) break; * a1 = *low1; * } * * Now, a1 is needed immediately in the next iteration * and there is no way to mask the latency of the load. * A better approach is to load a1 *before* the end-of-array * check; the problem is that we may be speculatively * loading an element out of range. While this is * probably not a problem in practice, yet I don't feel * comfortable with an incorrect algorithm. Therefore, * I use the 'fast' loop on the array (except for the last * element) and the 'slow' loop for the rest, saving both * performance and correctness. */ if (low1 < high1 && low2 < high2) { a1 = *low1; a2 = *low2; for (;;) { if (a1 < a2) { *lowdest++ = a1; a1 = *++low1; if (low1 >= high1) break; } else { *lowdest++ = a2; a2 = *++low2; if (low2 >= high2) break; } } } if (low1 <= high1 && low2 <= high2) { a1 = *low1; a2 = *low2; for (;;) { if (a1 < a2) { *lowdest++ = a1; ++low1; if (low1 > high1) break; a1 = *low1; } else { *lowdest++ = a2; ++low2; if (low2 > high2) break; a2 = *low2; } } } if (low1 > high1) { memcpy(lowdest, low2, sizeof(ELM) * (high2 - low2 + 1)); } else { memcpy(lowdest, low1, sizeof(ELM) * (high1 - low1 + 1)); } } #define swap_indices(a, b) \ { \ ELM *tmp;\ tmp = a;\ a = b;\ b = tmp;\ } ELM *binsplit(ELM val, ELM *low, ELM *high) { /* * returns index which contains greatest element <= val. If val is * less than all elements, returns low-1 */ ELM *mid; while (low != high) { mid = low + ((high - low + 1) >> 1); if (val <= *mid) high = mid - 1; else low = mid; } if (*low > val) return low - 1; else return low; } void cilkmerge_par(ELM *low1, ELM *high1, ELM *low2, ELM *high2, ELM *lowdest) { /* * Cilkmerge: Merges range [low1, high1] with range [low2, high2] * into the range [lowdest, ...] */ ELM *split1, *split2; /* * where each of the ranges are broken for * recursive merge */ long int lowsize; /* * total size of lower halves of two * ranges - 2 */ /* * We want to take the middle element (indexed by split1) from the * larger of the two arrays. The following code assumes that split1 * is taken from range [low1, high1]. So if [low1, high1] is * actually the smaller range, we should swap it with [low2, high2] */ if (high2 - low2 > high1 - low1) { swap_indices(low1, low2); swap_indices(high1, high2); } if (high2 < low2) { /* smaller range is empty */ memcpy(lowdest, low1, sizeof(ELM) * (high1 - low1)); return; } if (high2 - low2 < bots_app_cutoff_value ) { seqmerge(low1, high1, low2, high2, lowdest); return; } /* * Basic approach: Find the middle element of one range (indexed by * split1). Find where this element would fit in the other range * (indexed by split 2). Then merge the two lower halves and the two * upper halves. */ split1 = ((high1 - low1 + 1) / 2) + low1; split2 = binsplit(*split1, low2, high2); lowsize = split1 - low1 + split2 - low2; /* * directly put the splitting element into * the appropriate location */ *(lowdest + lowsize + 1) = *split1; #pragma omp task untied cilkmerge_par(low1, split1 - 1, low2, split2, lowdest); #pragma omp task untied cilkmerge_par(split1 + 1, high1, split2 + 1, high2, lowdest + lowsize + 2); #pragma omp taskwait return; } void cilksort_par(ELM *low, ELM *tmp, long size) { /* * divide the input in four parts of the same size (A, B, C, D) * Then: * 1) recursively sort A, B, C, and D (in parallel) * 2) merge A and B into tmp1, and C and D into tmp2 (in parallel) * 3) merge tmp1 and tmp2 into the original array */ long quarter = size / 4; ELM *A, *B, *C, *D, *tmpA, *tmpB, *tmpC, *tmpD; if (size < bots_app_cutoff_value_1 ) { /* quicksort when less than 1024 elements */ seqquick(low, low + size - 1); return; } A = low; tmpA = tmp; B = A + quarter; tmpB = tmpA + quarter; C = B + quarter; tmpC = tmpB + quarter; D = C + quarter; tmpD = tmpC + quarter; #pragma omp task untied cilksort_par(A, tmpA, quarter); #pragma omp task untied cilksort_par(B, tmpB, quarter); #pragma omp task untied cilksort_par(C, tmpC, quarter); #pragma omp task untied cilksort_par(D, tmpD, size - 3 * quarter); #pragma omp taskwait #pragma omp task untied cilkmerge_par(A, A + quarter - 1, B, B + quarter - 1, tmpA); #pragma omp task untied cilkmerge_par(C, C + quarter - 1, D, low + size - 1, tmpC); #pragma omp taskwait cilkmerge_par(tmpA, tmpC - 1, tmpC, tmpA + size - 1, A); } void scramble_array( ELM *array ) { unsigned long i; unsigned long j; for (i = 0; i < bots_arg_size; ++i) { j = my_rand(); j = j % bots_arg_size; swap(array[i], array[j]); } } void fill_array( ELM *array ) { unsigned long i; my_srand(1); /* first, fill with integers 1..size */ for (i = 0; i < bots_arg_size; ++i) { array[i] = i; } } void sort_init ( void ) { /* Checking arguments */ if (bots_arg_size < 4) { bots_message("%s can not be less than 4, using 4 as a parameter.\n", BOTS_APP_DESC_ARG_SIZE ); bots_arg_size = 4; } if (bots_app_cutoff_value < 2) { bots_message("%s can not be less than 2, using 2 as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF); bots_app_cutoff_value = 2; } else if (bots_app_cutoff_value > bots_arg_size ) { bots_message("%s can not be greather than vector size, using %d as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF, bots_arg_size); bots_app_cutoff_value = bots_arg_size; } if (bots_app_cutoff_value_1 > bots_arg_size ) { bots_message("%s can not be greather than vector size, using %d as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF_1, bots_arg_size); bots_app_cutoff_value_1 = bots_arg_size; } if (bots_app_cutoff_value_2 > bots_arg_size ) { bots_message("%s can not be greather than vector size, using %d as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF_2, bots_arg_size); bots_app_cutoff_value_2 = bots_arg_size; } if (bots_app_cutoff_value_2 > bots_app_cutoff_value_1) { bots_message("%s can not be greather than %s, using %d as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF_2, BOTS_APP_DESC_ARG_CUTOFF_1, bots_app_cutoff_value_1 ); bots_app_cutoff_value_2 = bots_app_cutoff_value_1; } array = (ELM *) malloc(bots_arg_size * sizeof(ELM)); tmp = (ELM *) malloc(bots_arg_size * sizeof(ELM)); fill_array(array); scramble_array(array); } void sort_par ( void ) { bots_message("Computing multisort algorithm (n=%d) ", bots_arg_size); #pragma omp parallel #pragma omp single nowait #pragma omp task untied cilksort_par(array, tmp, bots_arg_size); bots_message(" completed!\n"); } int sort_verify ( void ) { int i, success = 1; for (i = 0; i < bots_arg_size; ++i) if (array[i] != i) success = 0; return success ? BOTS_RESULT_SUCCESSFUL : BOTS_RESULT_UNSUCCESSFUL; }
mish_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: 942002795@qq.com */ #include <math.h> #include "sys_port.h" #include "module.h" #include "tengine_errno.h" #include "tengine_log.h" #include "tengine_ir.h" #include "../../cpu_node_ops.h" #include "tengine_op.h" int ref_mish_fp32(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, int num_thread) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; float* input_data = input_tensor->data; float* out_data = output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = src[i] * tanhf(log(1 + exp(src[i]))); } } return 0; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor; struct ir_tensor* output_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); ref_mish_fp32(input_tensor, output_tensor, exec_graph->num_thread); return 0; } static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* node = exec_node->ir_node; struct ir_graph* ir_graph = node->graph; struct ir_tensor* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]); struct ir_tensor* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]); int ret = set_ir_tensor_shape(output, input->dims, input->dim_num); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = {.prerun = NULL, .run = run, .reshape = reshape, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; static int reg_mish_hcl_ops(void* arg) { return register_builtin_node_ops(OP_MISH, &hcl_node_ops); } static int unreg_mish_hcl_ops(void* arg) { return unregister_builtin_node_ops(OP_MISH, &hcl_node_ops); } AUTO_REGISTER_OPS(reg_mish_hcl_ops); AUTO_UNREGISTER_OPS(unreg_mish_hcl_ops);
GB_binop__isne_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isne_int32) // A.*B function (eWiseMult): GB (_AemultB_01__isne_int32) // A.*B function (eWiseMult): GB (_AemultB_02__isne_int32) // A.*B function (eWiseMult): GB (_AemultB_03__isne_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_int32) // A*D function (colscale): GB (_AxD__isne_int32) // D*A function (rowscale): GB (_DxB__isne_int32) // C+=B function (dense accum): GB (_Cdense_accumB__isne_int32) // C+=b function (dense accum): GB (_Cdense_accumb__isne_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_int32) // C=scalar+B GB (_bind1st__isne_int32) // C=scalar+B' GB (_bind1st_tran__isne_int32) // C=A+scalar GB (_bind2nd__isne_int32) // C=A'+scalar GB (_bind2nd_tran__isne_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_INT32 || GxB_NO_ISNE_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isne_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isne_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isne_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isne_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isne_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isne_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isne_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isne_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isne_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isne_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isne_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isne_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__isne_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__isne_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
MsnhCVVector.h
#ifndef MSNHCVVECTOR_H #define MSNHCVVECTOR_H #include <Msnhnet/config/MsnhnetCfg.h> #include <iostream> namespace Msnhnet { template<int N,typename T> class MsnhNet_API Vector { public: Vector(){} Vector(const std::vector<T> &val) { if(val.size()!=N) { throw Exception(1,"[Vector]: set val num must equal data num! \n", __FILE__, __LINE__, __FUNCTION__); } for (int i = 0; i < N; ++i) { this->_value[i] = val[i]; } } Vector(const Vector& vec) { memcpy(this->_value,vec._value,sizeof(T)*N); } Vector &operator= (const Vector &vec) { memcpy(this->_value,vec._value,sizeof(T)*N); return *this; } inline void fill(const T &value) { for (int i = 0; i < N; ++i) { this->_value[i] = value; } } inline void print() { std::cout<<"{ Vector: "<<N<<std::endl; if(isF32Vec()) { for (int i = 0; i < N; ++i) { std::cout<<std::setiosflags(std::ios::left)<<std::setprecision(6)<<std::setw(6)<<_value[i]<<" "; } } else if(isF64Vec()) { for (int i = 0; i < N; ++i) { std::cout<<std::setiosflags(std::ios::left)<<std::setprecision(12)<<std::setw(12)<<_value[i]<<" "; } } else { for (int i = 0; i < N; ++i) { std::cout<<_value[i]<<" "; } } std::cout<<"\n}"<<std::endl; } void setVal(const std::vector<T> &val) { if(val.size()!=N) { throw Exception(1,"[Vector]: set val num must equal data num! \n", __FILE__, __LINE__, __FUNCTION__); } for (int i = 0; i < N; ++i) { this->_value[i] = val[i]; } } void setVal(const int &index, const T &val) { if(index>(N-1)) { throw Exception(1,"[Vector]: index out of memory! \n", __FILE__, __LINE__, __FUNCTION__); } this->_value[index] = val; } bool isFuzzyNull() const { if(isF32Vec()) { for (int i = 0; i < N; ++i) { if(fabsf(this->_value[i])>MSNH_F32_EPS) { return false; } } return true; } else if(isF64Vec()) { for (int i = 0; i < N; ++i) { if(abs(this->_value[i])>MSNH_F64_EPS) { return false; } } return true; } else { for (int i = 0; i < N; ++i) { if(this->_value[i]>0) { return false; } } return true; } } inline bool isNan() const { for (int i = 0; i < N; ++i) { if(std::isnan(static_cast<double>(this->_value[i]))) { return true; } } return false; } inline bool isF32Vec() const { return std::is_same<T,float>::value; } inline bool isF64Vec() const { return std::is_same<T,double>::value; } Vector normalized() const { if(!(isF32Vec() || isF64Vec())) { throw Exception(1, "[Vector] normalize only f32 and f64 is supported!", __FILE__, __LINE__,__FUNCTION__); } T len = 0; Vector vec; for (int i = 0; i < N; ++i) { len += this->_value[i]*this->_value[i]; } if(isF32Vec()) { if(fabsf(len - 1.0f) < MSNH_F32_EPS) { return *this; } if(fabsf(len) < MSNH_F32_EPS) { return vec; } len = sqrtf(len); } else if(isF32Vec()) { if(abs(len - 1.0) < MSNH_F64_EPS) { return *this; } if(abs(len) < MSNH_F64_EPS) { return vec; } len = sqrt(len); } for (int i = 0; i < N; ++i) { vec[i] = this->_value[i] / len; } return vec; } void normalize() { if(!(isF32Vec() || isF64Vec())) { throw Exception(1, "[Vector] normalize only f32 and f64 is supported!", __FILE__, __LINE__,__FUNCTION__); } T len = 0; for (int i = 0; i < N; ++i) { len += this->_value[i]*this->_value[i]; } if(this->isF32Vec()) { if(fabsf(len - 1.0f) < MSNH_F32_EPS || fabsf(len) < MSNH_F32_EPS) { return; } len = sqrtf(len); } else { if(abs(len - 1.0) < MSNH_F64_EPS || abs(len) < MSNH_F64_EPS) { return; } len = sqrt(len); } for (int i = 0; i < N; ++i) { this->_value[i] = this->_value[i] / len; } } inline double length() const { double len = 0; for (int i = 0; i < N; ++i) { len += this->_value[i]*this->_value[i]; } return sqrt(len); } inline double lengthSquared() const { double len = 0; for (int i = 0; i < N; ++i) { len += this->_value[i]*this->_value[i]; } return len; } /* 点到点之间的距离 * .eg ^ * | * A x --> --> ---> * | \ OA - OB = |BA| * | \ * O |-----x--> * B */ inline double distanceToPoint(const Vector &point) const { return (*this - point).length(); } /* 点到线之间的距离 * .eg ^ * \ | * x x(A) * | \ * | x (point) * | \ * O |-------x--> B * \(direction) * \LINE(point + direction) */ inline double distanceToLine(const Vector &point, const Vector &direction) const { if(N<2) { throw Exception(1,"[Vector] only 2 dims+ is supported!",__FILE__,__LINE__,__FUNCTION__); } if(direction.isFuzzyNull()) { return (*this - point).length(); } Vector p = point + Vector::dotProduct((*this-point)*direction,direction); return (*this - p).length(); } /* 点到线之间的距离 * .eg ^ * / \ | *(normal) * / x * * / | \ * / | \ x(A) * \ *| \ * \ O |-------x--> B * * \ / / * * /\ / * / \ / (plane) * / \/ * */ inline double distanceToPlane(const Vector& plane, const Vector& normal) const { if(N<3) { throw Exception(1,"[Vector] only 3 dims+ is supported!",__FILE__,__LINE__,__FUNCTION__); } return (*this-plane)*normal; } inline static Vector crossProduct(const Vector &v1, const Vector &v2) { if(N!=3) { throw Exception(1,"[Vector] only 3 dims is supported!",__FILE__,__LINE__,__FUNCTION__); } return Vector({ v1[1]*v2[2] - v1[2]*v2[1], v1[2]*v2[0] - v1[0]*v2[2], v1[0]*v2[1] - v1[1]*v2[0]}); } inline static Vector normal(const Vector &v1, const Vector &v2) { return crossProduct(v1,v2).normalized(); } inline static Vector normal(const Vector &v1, const Vector &v2, const Vector &v3) { return crossProduct((v2-v1),(v3-v1)).normalized(); } inline static T dotProduct(const Vector &A, const Vector &B) { T finalVal = 0; for (int i = 0; i < N; ++i) { finalVal += A[i]*B[i]; } return finalVal; } inline T operator [](const int &index) const { if(index > (N-1)) { throw Exception(1,"[Vector]: index out of memory! \n", __FILE__, __LINE__, __FUNCTION__); } return _value[index]; } inline T &operator [](const int &index) { if(index > (N-1)) { throw Exception(1,"[Vector]: index out of memory! \n", __FILE__, __LINE__, __FUNCTION__); } return _value[index]; } inline friend Vector operator+ (const Vector &A, const Vector &B) { Vector tmp; for (int i = 0; i < N; ++i) { tmp[i] = A[i] + B[i]; } return tmp; } inline friend Vector operator+ (T A, const Vector &B) { Vector tmp; for (int i = 0; i < N; ++i) { tmp[i] = A + B[i]; } return tmp; } inline friend Vector operator+ (const Vector &A, T B) { Vector tmp; for (int i = 0; i < N; ++i) { tmp[i] = A[i] + B; } return tmp; } inline friend Vector operator- (const Vector &A, const Vector &B) { Vector tmp; for (int i = 0; i < N; ++i) { tmp[i] = A[i] - B[i]; } return tmp; } inline friend Vector operator- (T A, const Vector &B) { Vector tmp; for (int i = 0; i < N; ++i) { tmp[i] = A - B[i]; } return tmp; } inline friend Vector operator- (const Vector &A, T B) { Vector tmp; for (int i = 0; i < N; ++i) { tmp[i] = A[i] - B; } return tmp; } inline friend Vector operator- (const Vector &A) { Vector tmp; for (int i = 0; i < N; ++i) { tmp[i] = 0 - A[i]; } return tmp; } inline friend Vector operator* (const Vector &A, const Vector &B) { Vector tmp; for (int i = 0; i < N; ++i) { tmp[i] = A[i] * B[i]; } return tmp; } inline friend Vector operator* (T A, const Vector &B) { Vector tmp; for (int i = 0; i < N; ++i) { tmp[i] = A * B[i]; } return tmp; } inline friend Vector operator* (const Vector &A, T B) { Vector tmp; for (int i = 0; i < N; ++i) { tmp[i] = A[i] * B; } return tmp; } inline friend Vector operator/ (const Vector &A, T B) { Vector tmp; for (int i = 0; i < N; ++i) { tmp[i] = A[i] / B; } return tmp; } inline friend bool operator== (const Vector &A, const Vector &B) { if(A.isF32Vec()) { for (int i = 0; i < N; ++i) { if(fabsf(A[i] - B[i])>MSNH_F32_EPS) { return false; } } } else if(A.isF64Vec()) { for (int i = 0; i < N; ++i) { if(fabsf(A[i] - B[i])>MSNH_F64_EPS) { return false; } } } else { for (int i = 0; i < N; ++i) { if(A[i] != B[i]) { return false; } } } return true; } inline friend bool operator!= (const Vector &A, const Vector &B) { if(std::is_same<T,float>::value) { for (int i = 0; i < N; ++i) { if(fabsf(A[i] - B[i])>MSNH_F32_EPS) { return true; } } } else if(std::is_same<T,double>::value) { for (int i = 0; i < N; ++i) { if(fabsf(A[i] - B[i])>MSNH_F64_EPS) { return true; } } } else { for (int i = 0; i < N; ++i) { if(A[i] != B[i]) { return true; } } } return false; } inline Vector &operator +=(const Vector &A) { for (int i = 0; i < N; ++i) { this->_value[i]+=A[i]; } return *this; } inline Vector &operator +=(T A) { for (int i = 0; i < N; ++i) { this->_value[i]+=A; } return *this; } inline Vector &operator -=(const Vector &A) { for (int i = 0; i < N; ++i) { this->_value[i]-=A[i]; } return *this; } inline Vector &operator -=(T A) { for (int i = 0; i < N; ++i) { this->_value[i]-=A; } return *this; } inline Vector &operator *=(const Vector &A) { for (int i = 0; i < N; ++i) { this->_value[i]*=A[i]; } return *this; } inline Vector &operator *=(T A) { for (int i = 0; i < N; ++i) { this->_value[i]*=A; } return *this; } inline Vector &operator /=(T A) { #ifdef USE_OMP #pragma omp parallel for num_threads(OMP_THREAD) reduction(+:len) #endif for (int i = 0; i < N; ++i) { this->_value[i]/=A; } return *this; } private: T _value[N]; }; typedef Vector<3,double> EulerD; typedef Vector<3,double> TransformD; typedef Vector<3,double> RotationVecD; typedef Vector<2,double> Vector2D; typedef Vector<3,double> Vector3D; typedef Vector<4,double> Vector4D; typedef Vector<3,float> EulerF; typedef Vector<3,float> TransformF; typedef Vector<3,float> RotationVecF; typedef Vector<2,float> Vector2F; typedef Vector<3,float> Vector3F; typedef Vector<4,float> Vector4F; } #endif
ten_tusscher_3_EPI_RS_CPU.c
#include "model_common.h" #include <assert.h> #include <stdlib.h> #include "ten_tusscher_3_RS.h" #define EPI GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { sv[0] = -86.2f; // V; millivolt sv[1] = 0.0f; //M sv[2] = 0.75; //H sv[3] = 0.75; //J sv[4] = 0.0f; //Xr1 sv[5] = 0.0f; //Xs sv[6] = 1.0f; //S sv[7] = 1.0f; //F sv[8] = 1.0f; //F2 sv[9] = 0.0; //D_INF sv[10] = 0.0; //R_INF sv[11] = 0.0; //Xr2_INF} } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; real *fibrosis; // Default values for a healthy cell /////////// real atpi = 6.8f; real Ko = 5.4f; real Ki = 138.3f; real Vm_change = 0.0; real GNa_multiplicator = 1.0f; real GCa_multiplicator = 1.0f; //////////////////////////////////// int num_extra_parameters = 6; size_t extra_parameters_size = num_extra_parameters*sizeof(real); if(extra_data) { fibrosis = ((real*)extra_data) + num_extra_parameters; //pointer } else { extra_data = malloc(extra_parameters_size); ((real*)extra_data)[0] = atpi; ((real*)extra_data)[1] = Ko; ((real*)extra_data)[2] = Ki; ((real*)extra_data)[3] = Vm_change; ((real*)extra_data)[4] = GNa_multiplicator; ((real*)extra_data)[5] = GCa_multiplicator; fibrosis = calloc(num_cells_to_solve, sizeof(real)); } int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i], fibrosis[i], extra_data); } } if(extra_data == NULL) free(fibrosis); } void solve_model_ode_cpu(real dt, real *sv, real stim_current, real fibrosis, real *extra_parameters) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt, fibrosis, extra_parameters); //THIS MODEL USES THE Rush Larsen Method TO SOLVE THE EDOS sv[0] = dt*rDY[0] + rY[0]; sv[1] = rDY[1]; sv[2] = rDY[2]; sv[3] = rDY[3]; sv[4] = rDY[4]; sv[5] = rDY[5]; sv[6] = rDY[6]; sv[7] = rDY[7]; sv[8] = rDY[8]; sv[9] = rDY[9]; sv[10] = rDY[10]; sv[11] = rDY[11]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt, real fibrosis, real *extra_parameters) { //fibrosis = 0 means that the cell is fibrotic, 1 is not fibrotic. Anything between 0 and 1 means border zone const real svolt = sv[0]; //printf("%lf, %lf, %lf, %lf, %lf\n", atpi, Ko, Ki_multiplicator, acidosis, fibrosis); real atpi = extra_parameters[0]; real Ko = extra_parameters[1]; real Ki = extra_parameters[2]; real Vm_modifier = extra_parameters[3]; real GNa_multiplicator = extra_parameters[4]; real GCa_multiplicator = extra_parameters[5]; Vm_modifier = Vm_modifier - Vm_modifier*fibrosis; //These values are from In Electrophysiologic effects of acute myocardial ischemia: a theoretical //study of altered cell excitability and action potential duration real svolt_acid = svolt - Vm_modifier; const real sh =sv[2]; const real sm =sv[1]; const real sj =sv[3]; const real sxr1 =sv[4]; const real sxs =sv[5]; const real ss =sv[6]; const real sf =sv[7]; const real sf2 =sv[8]; const real D_INF =sv[9]; const real Xr2_INF =sv[10]; const real R_INF =sv[11]; const real natp = 0.24; // K dependence of ATP-sensitive K current const real nicholsarea = 0.00005; // Nichol's areas (cm^2) const real hatp = 2; // Hill coefficient //Extracellular potassium concentration was elevated //from its default value of 5.4 mM to values between 6.0 and 8.0 mM //Ref: A Comparison of Two Models of Human Ventricular Tissue: Simulated Ischemia and Re-entry real Ko_change = 5.4f - Ko; Ko = Ko + Ko_change*fibrosis; //Linear changing of atpi depending on the fibrosis and distance from the center of the scar (only for border zone cells) real atpi_change = 6.8f - atpi; atpi = atpi + atpi_change*fibrosis; //real katp = 0.306; //Ref: A Comparison of Two Models of Human Ventricular Tissue: Simulated Ischaemia and Re-entry const real katp = -0.0942857142857f*atpi + 0.683142857143f; const real patp = 1.0f/(1.0f + powf((atpi/katp),hatp)); const real gkatp = 0.000195f/nicholsarea; const real gkbaratp = gkatp*patp*powf((Ko/4),natp); const real katp2= 1.4; const real hatp2 = 2.6; const real pcal = 1.0f/(1.0f + powf((katp2/atpi),hatp2)); const real Cao=2.0; const real Nao=140.0; const real Cai=0.00007; const real Nai=7.67; //This paramter changes with acidosis. //In Electrophysiologic effects of acute myocardial ischemia: a theoretical //study of altered cell excitability and action potential duration //the authors change Ki by multiplying it to 0.863259669. Should we do the same here? //This changes are based on data from rat and guinea pig real Ki_change = 138.3f - Ki; Ki = Ki + Ki_change*fibrosis; real GNa_multiplicator_change = 1.0f - GNa_multiplicator; GNa_multiplicator = GNa_multiplicator + GNa_multiplicator_change*fibrosis; real GCa_multiplicator_change = 1.0f - GCa_multiplicator; GCa_multiplicator = GCa_multiplicator + GCa_multiplicator_change*fibrosis; //printf("Ki = %lf\n", Ki); //Constants const real R = 8314.472; const real F = 96485.3415f; const real T = 310.0; const real RTONF = (R*T)/F; //Parameters for currents //Parameters for IKr const real Gkr=0.101; //Parameters for Iks const real pKNa=0.03; #ifdef EPI const real Gks=0.257; #endif #ifdef ENDO const real Gks=0.392; #endif #ifdef MCELL const real Gks=0.098; #endif //Parameters for Ik1 const real GK1=5.405; //Parameters for Ito #ifdef EPI const real Gto=0.294; #endif #ifdef ENDO const real Gto=0.073; #endif #ifdef MCELL const real Gto=0.294; #endif //Parameters for INa //if acidosis this has to change to 0.75*GNa real GNa=14.838; GNa = GNa*GNa_multiplicator; //Parameters for IbNa const real GbNa=0.00029; //Parameters for INaK const real KmK=1.0; const real KmNa=40.0; const real knak=2.724; //Parameters for ICaL //if acidosis this has to change to 0.75*GCaL real GCaL=0.2786f*pcal; GCaL = GCaL*GCa_multiplicator; //Parameters for IbCa const real GbCa=0.000592; //Parameters for INaCa const real knaca=1000; const real KmNai=87.5; const real KmCa=1.38; const real ksat=0.1; const real n=0.35; //Parameters for IpCa const real GpCa=0.1238; const real KpCa=0.0005; //Parameters for IpK; const real GpK=0.0293; const real Ek=RTONF*(logf((Ko/Ki))); const real Ena=RTONF*(logf((Nao/Nai))); const real Eks=RTONF*(logf((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); const real Eca=0.5f*RTONF*(logf((Cao/Cai))); real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real IKatp; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real Xr1_INF; real Xr2_INF_new; real TAU_Xr1; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF_new; real S_INF; real TAU_S; real Af; real Bf; real Cf; real Af2; real Bf2; real Cf2; real D_INF_new; real TAU_F; real F_INF; real TAU_F2; real F2_INF; real sItot; //Needed to compute currents Ak1=0.1f/(1.0f+expf(0.06f*(svolt-Ek-200.0f))); Bk1=(3.0f*expf(0.0002f*(svolt-Ek+100.0f))+ expf(0.1f*(svolt-Ek-10.0f)))/(1.0f+expf(-0.5f*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1.0f/(1.0f+0.1245f*expf(-0.1f*svolt_acid*F/(R*T))+0.0353f*expf(-svolt_acid*F/(R*T)))); rec_ipK=1.0f/(1.0f+expf((25.0f-svolt)/5.98f)); //According to In Electrophysiologic effects of acute myocardial ischemia: a theoretical //study of altered cell excitability and action potential duration //Vm_acid = Vm -3.4 for all sodium current computation //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt_acid-Ena); ICaL=GCaL*D_INF*sf*sf2*(svolt-60); Ito=Gto*R_INF*ss*(svolt-Ek); IKr=Gkr*sqrtf(Ko/5.4f)*sxr1*Xr2_INF*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1.0f/(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1.0f/(KmCa+Cao))* (1.0f/(1.0f+ksat*expf((n-1.0f)*svolt_acid*F/(R*T))))* (expf(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- expf((n-1.0f)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5f); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt_acid-Ena); IbCa=GbCa*(svolt-Eca); IKatp = gkbaratp*(svolt-Ek); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + IKatp + stim_current; //compute steady state values and time constants AM=1.0f/(1.0f+expf((-60.0f-svolt)/5.0f)); BM=0.1f/(1.0f+expf((svolt+35.0f)/5.0f))+0.10f/(1.0f+expf((svolt-50.0f)/200.0f)); TAU_M=AM*BM; M_INF=1.0f/((1.0f+expf((-56.86f-svolt)/9.03f))*(1.0f+expf((-56.86f-svolt)/9.03f))); if (svolt>=-40.) { AH_1=0.0f; BH_1=(0.77f/(0.13f*(1.0f+expf(-(svolt+10.66f)/11.1f)))); TAU_H= 1.0f/(AH_1+BH_1); } else { AH_2=(0.057f*expf(-(svolt+80.0f)/6.8f)); BH_2=(2.7f*expf(0.079f*svolt)+(3.1e5f)*expf(0.3485f*svolt)); TAU_H=1.0f/(AH_2+BH_2); } H_INF=1.0f/((1.0f+expf((svolt+71.55f)/7.43f))*(1.0f+expf((svolt+71.55f)/7.43f))); if(svolt>=-40.0f) { AJ_1=0.0f; BJ_1=(0.6f*expf((0.057f)*svolt)/(1.0f+expf(-0.1f*(svolt+32.0f)))); TAU_J= 1.0f/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4f)*expf(0.2444f*svolt)-(6.948e-6f)*expf(-0.04391f*svolt))*(svolt+37.78f)/ (1.0f+expf(0.311f*(svolt+79.23f)))); BJ_2=(0.02424f*expf(-0.01052f*svolt)/(1.0f+expf(-0.1378f*(svolt+40.14f)))); TAU_J= 1.0f/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1.0f/(1.0f+expf((-26.0f-svolt)/7.0f)); axr1=450.0f/(1.0f+expf((-45.0f-svolt)/10.0f)); bxr1=6.0f/(1.0f+expf((svolt-(-30.0f))/11.5f)); TAU_Xr1=axr1*bxr1; Xr2_INF_new=1.0f/(1.0f+expf((svolt-(-88.0f))/24.0f)); Xs_INF=1.0f/(1.0f+expf((-5.0f-svolt)/14.0f)); Axs=(1400.0f/(sqrtf(1.0f+expf((5.0f-svolt)/6.0f)))); Bxs=(1.0f/(1.0f+expf((svolt-35.0f)/15.0f))); TAU_Xs=Axs*Bxs+80; #ifdef EPI R_INF_new=1./(1.+expf((20-svolt)/6.)); S_INF=1./(1.+expf((svolt+20)/5.)); TAU_S=85.*expf(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+expf((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF_new=1.0f/(1.0f+expf((20.0f-svolt)/6.0f)); S_INF=1.0f/(1.0f+expf((svolt+28.0f)/5.0f)); TAU_S=1000.0f*expf(-(svolt+67.0f)*(svolt+67.0f)/1000.0f)+8.0f; #endif #ifdef MCELL R_INF_new=1./(1.+expf((20-svolt)/6.)); S_INF=1./(1.+expf((svolt+20)/5.)); TAU_S=85.*expf(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+expf((svolt-20.)/5.))+3.; #endif D_INF_new=1.0f/(1.0f+expf((-8.0f-svolt)/7.5f)); F_INF=1.0f/(1.0f+expf((svolt+20)/7)); Af=1102.5f*expf(-(svolt+27)*(svolt+27.0f)/225.0f); Bf=200.0f/(1.0f+expf((13.0f-svolt)/10.f)); Cf=(180.0f/(1.0f+expf((svolt+30.0f)/10.0f)))+20.0f; TAU_F=Af+Bf+Cf; F2_INF=0.67f/(1.0f+expf((svolt+35.0f)/7.0f))+0.33f; Af2=600.0f*expf(-(svolt+27.0f)*(svolt+27.0f)/170.0f); Bf2=7.75f/(1.0f+expf((25.0f-svolt)/10.0f)); Cf2=16.0f/(1.0f+expf((svolt+30.0f)/10.0f)); TAU_F2=Af2+Bf2+Cf2; //update voltage rDY_[0] = -sItot; //Update gates rDY_[1] = M_INF-(M_INF-sm)*expf(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*expf(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*expf(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*expf(-dt/TAU_Xr1); rDY_[5] = Xs_INF-(Xs_INF-sxs)*expf(-dt/TAU_Xs); rDY_[6]= S_INF-(S_INF-ss)*expf(-dt/TAU_S); rDY_[7] =F_INF-(F_INF-sf)*expf(-dt/TAU_F); rDY_[8] =F2_INF-(F2_INF-sf2)*expf(-dt/TAU_F2); rDY_[9] = D_INF_new; rDY_[10] = R_INF_new; rDY_[11] = Xr2_INF_new; }
uni_plymc_write.h
/* * plymc.h * filter_plymc * * Created by Paolo Cignoni on 10/23/09. * Copyright 2009 ISTI - CNR. All rights reserved. * */ #ifndef __PLYMC_H__ #define __PLYMC_H__ #ifndef WIN32 #define _int64 long long #define __int64 long long #define __cdecl #endif #include <cstdio> #include <time.h> #include <float.h> #include <math.h> #include <locale> #include <iostream> //#include <tchar.h> #include <list> #include <limits> #include <vcg/space/index/grid_static_ptr.h> #include <vcg/simplex/vertex/base.h> #include <vcg/simplex/face/base.h> #include <vcg/complex/used_types.h> #include <vcg/complex/complex.h> #include <vcg/complex/algorithms/update/position.h> #include <vcg/complex/algorithms/update/normal.h> #include <vcg/complex/algorithms/update/quality.h> #include <vcg/complex/algorithms/update/edges.h> #include <vcg/complex/algorithms/update/topology.h> #include <vcg/complex/algorithms/update/flag.h> #include <vcg/complex/algorithms/update/bounding.h> #include <vcg/math/histogram.h> #include <vcg/complex/algorithms/clean.h> #include <wrap/io_trimesh/import.h> #include <wrap/io_trimesh/export_ply.h> #include <wrap/ply/plystuff.h> #include <vcg/complex/algorithms/create/marching_cubes.h> #include <vcg/complex/algorithms/create/extended_marching_cubes.h> #include "trivial_walker.h" // local optimization #include <vcg/complex/algorithms/local_optimization.h> #include <vcg/complex/algorithms/local_optimization/tri_edge_collapse.h> #include <vcg/complex/algorithms/local_optimization/tri_edge_collapse_quadric.h> #include <vcg/simplex/edge/base.h> #include <stdarg.h> #include "volume.h" #include "tri_edge_collapse_mc.h" #include <osg/Timer> typedef bool CallBackPosTotal(const int pos, const int total,unsigned long long tick,const char * str ); namespace vcg { namespace tri { // Simple prototype for later use... template<class MeshType> void MCSimplify( MeshType &m, float perc, bool preserveBB=true, vcg::CallBackPos *cb=0); template < class SMesh, class MeshProvider> class PlyMC { public: class MCVertex; class MCEdge; class MCFace; class MCUsedTypes: public vcg::UsedTypes <vcg::Use<MCVertex>::template AsVertexType, vcg::Use<MCEdge >::template AsEdgeType, vcg::Use<MCFace >::template AsFaceType >{}; class MCVertex : public Vertex< MCUsedTypes, vertex::Coord3f, vertex::Color4b, vertex::Mark, vertex::VFAdj, vertex::BitFlags, vertex::Qualityf>{}; class MCEdge : public Edge<MCUsedTypes,edge::VertexRef> { public: inline MCEdge() {}; inline MCEdge( MCVertex * v0, MCVertex * v1){this->V(0) = v0; this->V(1) = v1; }; static inline MCEdge OrderedEdge(MCVertex* v0,MCVertex* v1){ if(v0<v1) return MCEdge(v0,v1); else return MCEdge(v1,v0); } }; class MCFace : public Face< MCUsedTypes, face::InfoOcf, face::VertexRef, face::FFAdjOcf, face::VFAdjOcf, face::BitFlags> {}; class MCMesh : public vcg::tri::TriMesh< std::vector< MCVertex>, face::vector_ocf< MCFace > > {}; //****************************************** //typedef Voxel<float> Voxelf; typedef Voxelfc Voxelf; //****************************************** class Parameter { public: Parameter() { NCell=10000; WideNum= 3; WideSize=0; VoxSize=0; IPosS=Point3i(0,0,0); // SubVolume Start IPosE=Point3i(0,0,0); // SubVolume End IPosB=Point3i(0,0,0); // SubVolume to restart from in lexicographic order (useful for crashes) //IPos=Point3i(0,0,0); IDiv=Point3i(1,1,1); VerboseLevel=0; SliceNum=1; FillThr=12; ExpAngleDeg=30; SmoothNum=1; RefillNum=1; IntraSmoothFlag = false; QualitySmoothAbs = 0.0f; // 0 means un-setted value. QualitySmoothVox = 3.0f; // expressed in voxel OffsetFlag=false; OffsetThr=-3; GeodesicQualityFlag=true; PLYFileQualityFlag=false; SaveVolumeFlag=false; SafeBorder=1; CleaningFlag=false; SimplificationFlag=false; VertSplatFlag=false; MergeColor=false; basename = "plymcout"; } int NCell; int WideNum; float WideSize; float VoxSize; Point3i IPosS; // SubVolume Start Point3i IPosE; // SubVolume End Point3i IPosB; // SubVolume to restart from in lexicographic order (useful for crashes) //Point3i IPos; Point3i IDiv; int VerboseLevel; int SliceNum; int FillThr; float ExpAngleDeg; int SmoothNum; int RefillNum; bool IntraSmoothFlag; float QualitySmoothAbs; // 0 means un-setted value. float QualitySmoothVox; // expressed in voxel bool OffsetFlag; float OffsetThr; bool GeodesicQualityFlag; bool PLYFileQualityFlag; bool SaveVolumeFlag; int SafeBorder; bool CleaningFlag; bool SimplificationFlag; bool VertSplatFlag; bool MergeColor; std::string basename; std::vector<std::string> OutNameVec; std::vector<std::string> OutNameSimpVec; }; //end Parameter class /// PLYMC Data MeshProvider MP; Parameter p; // std::vector< std::vector<std::vector<Volume<Voxelf> > > >vVV; /// PLYMC Methods bool InitMesh(Volume<Voxelf> &VV,SMesh &m, const char *filename, Matrix44f Tr) { typename SMesh::VertexIterator vi; int loadmask; int ret = tri::io::Importer<SMesh>::Open(m,filename,loadmask); tri::Clean<SMesh>::FlipMesh(m); if(ret) { printf("Error: unabe to open mesh '%s'",filename); return false; } if(p.VertSplatFlag) { if(!(loadmask & tri::io::Mask::IOM_VERTNORMAL)) { printf("Error, pointset MUST have normals"); exit(-1); } else printf("Ok Pointset has normals\n"); for(vi=m.vert.begin(); vi!=m.vert.end();++vi) if(math::Abs(SquaredNorm((*vi).N())-1.0)>0.0001) { printf("Error: mesh has not per vertex normalized normals\n"); return false; } if(!(loadmask & tri::io::Mask::IOM_VERTQUALITY)) tri::UpdateQuality<SMesh>::VertexConstant(m,0); tri::UpdateNormals<SMesh>::PerVertexMatrix(m,Tr); //if(!(loadmask & tri::io::Mask::IOM_VERTCOLOR)) // saveMask &= ~tri::io::Mask::IOM_VERTCOLOR; } else // processing for triangle meshes { if(p.CleaningFlag){ int dup = tri::Clean<SMesh>::RemoveDuplicateVertex(m); int unref = tri::Clean<SMesh>::RemoveUnreferencedVertex(m); printf("Removed %i duplicates and %i unref",dup,unref); } tri::UpdateNormals<SMesh>::PerVertexNormalizedPerFaceNormalized(m); if(p.GeodesicQualityFlag) { tri::UpdateTopology<SMesh>::VertexFace(m); tri::UpdateFlags<SMesh>::FaceBorderFromVF(m); tri::UpdateQuality<SMesh>::VertexGeodesicFromBorder(m); } } tri::UpdatePosition<SMesh>::Matrix(m,Tr,false); tri::UpdateBounding<SMesh>::Box(m); //printf("Init Mesh %s (%ivn,%ifn)\n",filename,m.vn,m.fn); for(vi=m.vert.begin(); vi!=m.vert.end();++vi) VV.Interize((*vi).P()); return true; } // This function add a mesh (or a point cloud to the volume) // the point cloud MUST have normalized vertex normals. bool AddMeshToVolumeM(Volume<Voxelf> &VV,SMesh &m, std::string meshname, const double w ) { typename SMesh::VertexIterator vi; typename SMesh::FaceIterator fi; if(!m.bbox.Collide(VV.SubBoxSafe)) return false; size_t found =meshname.find_last_of("/\\"); std::string shortname = meshname.substr(found+1); Volume <Voxelf> B; B.Init(VV); bool res=false; double quality=0; // Now add the mesh to the volume if(!p.VertSplatFlag) { float minq=std::numeric_limits<float>::max(), maxq=-std::numeric_limits<float>::max(); // Calcolo range qualita geodesica PER FACCIA come media di quelle per vertice for(fi=m.face.begin(); fi!=m.face.end();++fi){ (*fi).Q()=((*fi).V(0)->Q()+(*fi).V(1)->Q()+(*fi).V(2)->Q())/3.0f; minq=std::min((*fi).Q(),minq); maxq=std::max((*fi).Q(),maxq); } // La qualita' e' inizialmente espressa come distanza assoluta dal bordo della mesh //printf("Q [%4.2f %4.2f] \n",minq,maxq); bool closed=false; if(minq==maxq) closed=true; // se la mesh e' chiusa la ComputeGeodesicQuality mette la qualita a zero ovunque // Classical approach: scan each face int tt0=clock(); //printf("---- Face Rasterization"); for(fi=m.face.begin(); fi!=m.face.end();++fi) { if(closed || (p.PLYFileQualityFlag==false && p.GeodesicQualityFlag==false)) quality=1.0; else quality=w*(*fi).Q(); if(quality) res |= B.ScanFace((*fi).V(0)->P(),(*fi).V(1)->P(),(*fi).V(2)->P(),quality,(*fi).N()); } //printf("herer\n"); // printf(" : %li\n",clock()-tt0); } else { // Splat approach add only the vertices to the volume printf("Vertex Splatting\n"); for(vi=m.vert.begin();vi!=m.vert.end();++vi) { if(p.PLYFileQualityFlag==false) quality=1.0; else quality=w*(*vi).Q(); if(quality) res |= B.SplatVert((*vi).P(),quality,(*vi).N(),(*vi).C()); } } if(!res) return false; int vstp=0; if(p.VerboseLevel>0) { B.SlicedPPM(shortname.c_str(),std::string(SFormat("%02i",vstp)).c_str(),p.SliceNum ); B.SlicedPPMQ(shortname.c_str(),std::string(SFormat("%02i",vstp)).c_str(),p.SliceNum ); vstp++; } for(int i=0;i<p.WideNum;++i) { B.Expand(math::ToRad(p.ExpAngleDeg)); if(p.VerboseLevel>1) B.SlicedPPM(shortname.c_str(),SFormat("%02ie",vstp++),p.SliceNum ); B.Refill(p.FillThr); if(p.VerboseLevel>1) B.SlicedPPM(shortname.c_str(),SFormat("%02if",vstp++),p.SliceNum ); if(p.IntraSmoothFlag) { Volume <Voxelf> SM; SM.Init(VV); SM.CopySmooth(B,1,p.QualitySmoothAbs); B=SM; if(p.VerboseLevel>1) B.SlicedPPM(shortname.c_str(),SFormat("%02is",vstp++),p.SliceNum ); // if(VerboseLevel>1) B.SlicedPPMQ(shortname,SFormat("%02is",vstp),SliceNum ); } } if(p.SmoothNum>0) { Volume <Voxelf> SM; SM.Init(VV); SM.CopySmooth(B,1,p.QualitySmoothAbs); B=SM; if(p.VerboseLevel>1) B.SlicedPPM(shortname.c_str(),SFormat("%02isf",vstp++),p.SliceNum ); } VV.Merge(B); if(p.VerboseLevel>0) VV.SlicedPPMQ(std::string("merge_").c_str(),shortname.c_str(),p.SliceNum ); return true; } void GetSubVolumeTag(Point3i div,Point3i pos,std::string &subtag) { char buf[32]; if (div[0]<= 10 && div[1]<= 10 && div[2]<= 10 ) sprintf(buf,"_%01d%01d%01d",pos[0],pos[1],pos[2]); else if(div[0]<= 100 && div[1]<= 100 && div[2]<= 100 ) sprintf(buf,"_%02d%02d%02d",pos[0],pos[1],pos[2]); else sprintf(buf,"_%03d%03d%03d",pos[0],pos[1],pos[2]); subtag=buf; } void ProcessCells(CallBackPosTotal *cb=0) { unsigned long long startTick=osg::Timer::instance()->tick(); printf("bbox scanning...\n"); fflush(stdout); Matrix44f Id; Id.SetIdentity(); MP.InitBBox(); printf("Completed BBox Scanning \n"); Box3f fullb = MP.fullBB(); assert (!fullb.IsNull()); assert (!fullb.IsEmpty()); // Calcolo gridsize Point3i gridsize; Point3f voxdim; fullb.Offset(fullb.Diag() * 0.1 ); voxdim = fullb.max - fullb.min; int TotAdd=0,TotMC=0,TotSav=0; // if kcell==0 the number of cells is computed starting from required voxel size; __int64 cells; if(p.NCell>0) cells = (__int64)(p.NCell)*(__int64)(1000); else cells = (__int64)(voxdim[0]/p.VoxSize) * (__int64)(voxdim[1]/p.VoxSize) *(__int64)(voxdim[2]/p.VoxSize) ; Box3i globalBox; { Volume<Voxelf> B; // local to this small block Box3f fullbf; fullbf.Import(fullb); B.Init(cells,fullbf,p.IDiv,p.IPosS); B.Dump(stdout); if(p.WideSize>0) p.WideNum=p.WideSize/B.voxel.Norm(); globalBox=B.SubPart; // Now the volume has been determined; the quality threshold in absolute units can be computed if(p.QualitySmoothAbs==0) p.QualitySmoothAbs= p.QualitySmoothVox * B.voxel.Norm(); } bool res=false; /*vVV.resize(p.IDiv[0]); for(int i=0; i<vVV.size(); i++){ vVV[i].resize(p.IDiv[1]); for(int j=0; j<vVV[i].size(); j++) vVV[i][j].resize(p.IDiv[2]); }*/ //#pragma omp parallel for for(int xx=p.IPosS[0];xx<=p.IPosE[0];++xx) for(int yy=p.IPosS[1];yy<=p.IPosE[1];++yy) for(int zz=p.IPosS[2];zz<=p.IPosE[2];++zz) if((zz+(yy*p.IDiv[2])+(xx*p.IDiv[2]*p.IDiv[1])) >= (p.IPosB[2]+(p.IPosB[1]*p.IDiv[2])+(p.IPosB[0]*p.IDiv[2]*p.IDiv[1]))) // skip until IPos >= IPosB { printf("----------- SubBlock %2i %2i %2i ----------\n",xx,yy,zz); //Volume<Voxelf> B; Volume<Voxelf> VV;// =vVV[xx][yy][zz]; int t0=clock(); Box3f fullbf; fullbf.Import(fullb); //VV.DeltaVoxelSafe=1; Point3i IPos; IPos[0]=xx; IPos[1]=yy; IPos[2]=zz; VV.Init(cells,fullbf,p.IDiv,IPos); printf("\n\n --------------- Allocated subcells. %i\n",VV.Allocated()); std::string filename=p.basename; if(p.IDiv!=Point3i(1,1,1)) { std::string subvoltag; VV.GetSubVolumeTag(subvoltag); filename+=subvoltag; } /********** Grande loop di scansione di tutte le mesh *********/ for(int i=0;i<MP.size();++i) { Box3f bbb= MP.bb(i); /**********************/ cb((i+1),MP.size(),startTick,"Vol"); /**********************/ // if bbox of mesh #i is part of the subblock, then process it if(bbb.Collide(VV.SubBoxSafe)) { SMesh *sm; if(!MP.Find(i,sm) ) { res = InitMesh(VV,*sm,MP.MeshName(i).c_str(),MP.Tr(i)); if(!res) { printf("Failed Init of mesh %s",MP.MeshName(i).c_str()); //break; } } res |= AddMeshToVolumeM(VV,*sm, MP.MeshName(i),MP.W(i)); } } /* for(int k=VV.SubPart.min[2];k<VV.SubPart.max[2];++k) for(int j=VV.SubPart.min[1];j<VV.SubPart.max[1];++j) for(int i=VV.SubPart.min[0];i<VV.SubPart.max[0];++i){ float fv=VV.V(i,j,k).V(); if(fv != 0) printf("aa %f---%d %d %d\n",fv,k,j,i); }*/ //B.Normalize(1); printf("End Scanning\n"); if(p.OffsetFlag) { VV.Offset(p.OffsetThr); if (p.VerboseLevel>0) { VV.SlicedPPM("finaloff","__",p.SliceNum); VV.SlicedPPMQ("finaloff","__",p.SliceNum); } } //if(p.VerboseLevel>1) VV.SlicedPPM(filename.c_str(),SFormat("_%02im",i),p.SliceNum ); for(int i=0;i<p.RefillNum;++i) { //VV.Refill(3,6); if(p.VerboseLevel>1) VV.SlicedPPM(filename.c_str(),SFormat("_%02imsr",i),p.SliceNum ); //if(VerboseLevel>1) VV.SlicedPPMQ(filename,SFormat("_%02ips",i++),SliceNum ); } for(int i=0;i<p.SmoothNum;++i) { Volume <Voxelf> SM; SM.Init(VV); printf("%2i/%2i: ",i,p.SmoothNum); SM.CopySmooth(VV,1,p.QualitySmoothAbs); VV=SM; // VV.Refill(3,6); if(p.VerboseLevel>1) VV.SlicedPPM(filename.c_str(),SFormat("_%02ims",i),p.SliceNum ); } int t1=clock(); //-------- TotAdd+=t1-t0; printf("Extracting surface...\r"); if (p.VerboseLevel>0) { VV.SlicedPPM("final","__",p.SliceNum); VV.SlicedPPMQ("final","__",p.SliceNum); } std::string fn="test"; if(1){//VV.div!=Point3i(1,1,1)) { std::string subvoltag; VV.GetSubVolumeTag(subvoltag); fn+=subvoltag; } std::string datname=fn; std::string rawname=fn; rawname+=".raw"; VV.Write(rawname,0,0); //MCMesh me; // } } Box3i getBBoxFromFile(std::string filename){ FILE *fp; fp=fopen(filename.c_str(),"rb"); if(!fp) { printf("Error: unable ro open output volume file '%s'\n",filename.c_str()); exit(-1); } _int64 cells; Box3<float> bb; Point3i div; Point3i pos; Point3i sz; fread(&cells,sizeof(_int64),1,fp); float bbtmp[6]; fread(&bbtmp[0],sizeof(float),6,fp); for(int i=0;i<3; i++) bb.min[i]=bbtmp[i]; for(int i=0;i<3; i++) bb.max[i]=bbtmp[i+3]; int pttmp[3]; fread(&pttmp[0],sizeof(int),3,fp); for(int i=0;i<3; i++) div[i]=pttmp[i]; fread(&pttmp[0],sizeof(int),3,fp); for(int i=0;i<3; i++){ pos[i]=pttmp[i]; } fread(&pttmp[0],sizeof(int),3,fp); for(int i=0;i<3; i++){ sz[i]=pttmp[i]; } fclose(fp); Box3i SubPart,SubPartSafe; // Setting the subpart under analisys for(int k=0;k<3;++k) { SubPart.min[k]= pos[k]*sz[k]/div[k]; SubPart.max[k]=(pos[k]+1)*sz[k]/div[k]; // SubBox.min[k]= bbox.min[k]+SubPart.min[k]*voxel[k]; // SubBox.max[k]= bbox.min[k]+SubPart.max[k]*voxel[k]; } // Setting the Safe Subpart under analisys SubPartSafe=SubPart; for(int k=0;k<3;++k) { SubPartSafe.min[k] -= Volume<Voxelf>::BLOCKSIDE();; SubPartSafe.max[k] += Volume<Voxelf>::BLOCKSIDE();; if( SubPartSafe.min[k]< 0 ) SubPartSafe.min[k] = 0; if( SubPartSafe.max[k]> sz[k] ) SubPartSafe.max[k] = sz[k]; // SubBoxSafe.min[k]= bbox.min[k]+SubPartSafe.min[k]*voxel[k]; // SubBoxSafe.max[k]= bbox.min[k]+SubPartSafe.max[k]*voxel[k]; } return SubPartSafe; } void ProcessNormalize(CallBackPosTotal *cb=0) { int cnt=0; std::vector< std::vector<std::vector<Volume<Voxelf> > > >vVV; vVV.resize(p.IDiv[0]); for(int i=0; i<vVV.size(); i++){ vVV[i].resize(p.IDiv[1]); for(int j=0; j<vVV[i].size(); j++) vVV[i][j].resize(p.IDiv[2]); } for(int xx=p.IPosS[0];xx<=p.IPosE[0];++xx) for(int yy=p.IPosS[1];yy<=p.IPosE[1];++yy) for(int zz=p.IPosS[2];zz<=p.IPosE[2];++zz) if((zz+(yy*p.IDiv[2])+(xx*p.IDiv[2]*p.IDiv[1])) >= (p.IPosB[2]+(p.IPosB[1]*p.IDiv[2])+(p.IPosB[0]*p.IDiv[2]*p.IDiv[1]))) // skip until IPos >= IPosB { std::string fn="test"; if(1){//VV.div!=Point3i(1,1,1)) { std::string subvoltag; Point3i pos(xx,yy,zz); GetSubVolumeTag(p.IDiv,pos,subvoltag); fn+=subvoltag; } std::string datname=fn; std::string rawname=fn; rawname+=".raw"; Box3i ibox; printf("Loading %s\n ",rawname.c_str()); vVV[xx][yy][zz].Read(rawname); } printf("Done Loading\n"); //#pragma omp parallel for for(int xx=p.IPosS[0];xx<=p.IPosE[0];++xx) for(int yy=p.IPosS[1];yy<=p.IPosE[1];++yy) for(int zz=p.IPosS[2];zz<=p.IPosE[2];++zz) if((zz+(yy*p.IDiv[2])+(xx*p.IDiv[2]*p.IDiv[1])) >= (p.IPosB[2]+(p.IPosB[1]*p.IDiv[2])+(p.IPosB[0]*p.IDiv[2]*p.IDiv[1]))) // skip until IPos >= IPosB { std::string fn="test"; Volume<Voxelf> &VV= vVV[xx][yy][zz]; if(1){//VV.div!=Point3i(1,1,1)) { std::string subvoltag; Point3i pos(xx,yy,zz); GetSubVolumeTag(p.IDiv,pos,subvoltag); fn+=subvoltag; } std::string datname=fn; std::string rawname=fn; rawname+=".raw"; Box3i ibox; // VV.Read(rawname); bool madeChange=false; for(int xxx=p.IPosS[0];xxx<=p.IPosE[0];++xxx) for(int yyy=p.IPosS[1];yyy<=p.IPosE[1];++yyy){ if(xxx==xx && yyy==yy) continue; std::string fn_comapre="test"; if(1){//VV.div!=Point3i(1,1,1)) { std::string subvoltag_compare; Point3i pos(xxx,yyy,zz); GetSubVolumeTag(p.IDiv,pos,subvoltag_compare); fn_comapre+=subvoltag_compare; fn_comapre+=".raw"; } Volume<Voxelf> &VV_compare=vVV[xxx][yyy][zz];; Box3i SubPartSafeCompare=VV_compare.SubPartSafe; // Box3i SubPartSafeCompare=getBBoxFromFile(fn_comapre); if(!SubPartSafeCompare.Collide(VV.SubPartSafe)) continue; // VV_compare.Read(fn_comapre); ibox.min[0] = std::max(SubPartSafeCompare.min[0],VV.SubPartSafe.min[0]); ibox.min[1] = std::max(SubPartSafeCompare.min[1],VV.SubPartSafe.min[1]); ibox.min[2] = std::max(SubPartSafeCompare.min[2],VV.SubPartSafe.min[2]); ibox.max[0] = std::min(SubPartSafeCompare.max[0],VV.SubPartSafe.max[0]); ibox.max[1] = std::min(SubPartSafeCompare.max[1],VV.SubPartSafe.max[1]); ibox.max[2] = std::min(SubPartSafeCompare.max[2],VV.SubPartSafe.max[2]); // ibox=globalBox; /* printf("%d %d %d -- %d %d %d\n",ibox.min[0],ibox.min[1],ibox.min[2], ibox.max[0],ibox.max[1],ibox.max[2]); printf("A %d %d %d -- %d %d %d\n",SubPartSafe.min[0],SubPartSafe.min[1],SubPartSafe.min[2], SubPartSafe.max[0],SubPartSafe.max[1],SubPartSafe.max[2]); printf("B %d %d %d -- %d %d %d\n",VV.SubPartSafe.min[0],VV.SubPartSafe.min[1],VV.SubPartSafe.min[2], VV.SubPartSafe.max[0],VV.SubPartSafe.max[1],VV.SubPartSafe.max[2]);*/ for(int xxxx=ibox.min[0];xxxx<=ibox.max[0];++xxxx) for(int yyyy=ibox.min[1];yyyy<=ibox.max[1];++yyyy) for(int zzzz=ibox.min[2];zzzz<=ibox.max[2];++zzzz){ // printf("%d %d %d\n",xxxx,yyyy,zzzz); if(VV.Val(xxxx,yyyy,zzzz) == 0.0) continue; if(VV_compare.Val(xxxx,yyyy,zzzz)!=VV.Val(xxxx,yyyy,zzzz)){ #pragma omp critical { // printf("%f %f\n",VV_compare.Val(xxxx,yyyy,zzzz),VV.Val(xxxx,yyyy,zzzz)); if(VV_compare.Val(xxxx,yyyy,zzzz) == 1000.000) ;//VV_compare.V(xxxx,yyyy,zzzz).Set(VV.V(xxxx,yyyy,zzzz)); else if(VV.Val(xxxx,yyyy,zzzz) == 1000.000) VV.V(xxxx,yyyy,zzzz).Set(VV_compare.V(xxxx,yyyy,zzzz)); else{ VV_compare.V(xxxx,yyyy,zzzz).Blend(VV.V(xxxx,yyyy,zzzz),0.5); VV.V(xxxx,yyyy,zzzz).Set( VV_compare.V(xxxx,yyyy,zzzz)); } madeChange=true; } } // vVV[xxx][yyy][zz].V(xxxx,yyyy,zzzz).Set(VV.V(xxxx,yyyy,zzzz)); // VV.V(xxxx,yyyy,zzzz).SetB(false); } } /* std::string filename="final"; if(p.IDiv!=Point3i(1,1,1)) { std::string subvoltag; VV.GetSubVolumeTag(subvoltag); filename+=subvoltag; } VV.SlicedPPM(filename.c_str(),"__",1); VV.SlicedPPMQ(filename.c_str(),"__",1); VV.Dump(stdout);*/ if(madeChange) VV.Write(rawname,0,0); printf("----------- Equalizing corner SubBlock %2i %2i %2i ----------\n",xx,yy,zz); //Volume<Voxelf> B; } //#pragma omp parallel for } void ProcessMC(CallBackPosTotal *cb=0) { int TotAdd=0,TotMC=0,TotSav=0; for(int xx=p.IPosS[0];xx<=p.IPosE[0];++xx) for(int yy=p.IPosS[1];yy<=p.IPosE[1];++yy) for(int zz=p.IPosS[2];zz<=p.IPosE[2];++zz) if((zz+(yy*p.IDiv[2])+(xx*p.IDiv[2]*p.IDiv[1])) >= (p.IPosB[2]+(p.IPosB[1]*p.IDiv[2])+(p.IPosB[0]*p.IDiv[2]*p.IDiv[1]))) // skip until IPos >= IPosB { //Volume<Voxelf> &VV =vVV[xx][yy][zz]; Volume<Voxelf> VV; std::string fn="test"; if(1){//VV.div!=Point3i(1,1,1)) { std::string subvoltag; Point3i pos(xx,yy,zz); GetSubVolumeTag(p.IDiv,pos,subvoltag); fn+=subvoltag; } std::string datname=fn; std::string rawname=fn; rawname+=".raw"; VV.Read(rawname); std::string filename=p.basename; if(p.IDiv!=Point3i(1,1,1)) { std::string subvoltag; VV.GetSubVolumeTag(subvoltag); filename+=subvoltag; } bool res=true; MCMesh me; if(res) { typedef vcg::tri::TrivialWalker<MCMesh, Volume <Voxelf> > Walker; typedef vcg::tri::MarchingCubes<MCMesh, Walker> MarchingCubes; //typedef vcg::tri::ExtendedMarchingCubes<MCMesh, Walker> ExtendedMarchingCubes; Walker walker; MarchingCubes mc(me, walker); Box3i currentSubBox=VV.SubPartSafe; Point3i currentSubBoxRes=VV.ssz; /**********************/ cb(50,50,0,"Step 2: Marching Cube..."); /**********************/ walker.BuildMesh(me,VV,mc,currentSubBox,currentSubBoxRes); typename MCMesh::VertexIterator vi; Box3f bbb; bbb.Import(VV.SubPart); for(vi=me.vert.begin();vi!=me.vert.end();++vi) { if(!bbb.IsIn((*vi).P())) vcg::tri::Allocator< MCMesh >::DeleteVertex(me,*vi); VV.DeInterize((*vi).P()); } typename MCMesh::FaceIterator fi; for (fi = me.face.begin(); fi != me.face.end(); ++fi) { if((*fi).V(0)->IsD() || (*fi).V(1)->IsD() || (*fi).V(2)->IsD() ) vcg::tri::Allocator< MCMesh >::DeleteFace(me,*fi); else std::swap((*fi).V1(0), (*fi).V2(0)); } int t2=clock(); //-------- // TotMC+=t2-t1; if(me.vn >0 || me.fn >0) { p.OutNameVec.push_back(filename+std::string(".ply")); int saveMask=0; if(p.MergeColor) saveMask |= tri::io::Mask::IOM_VERTCOLOR ; tri::io::ExporterPLY<MCMesh>::Save(me,p.OutNameVec.back().c_str(),saveMask); if(p.SimplificationFlag) { /**********************/ cb(50,50,0,"Step 3: Simplify mesh..."); /**********************/ p.OutNameSimpVec.push_back(filename+std::string(".d.ply")); me.face.EnableVFAdjacency(); MCSimplify<MCMesh>(me, VV.voxel[0]/4.0); tri::Allocator<MCMesh>::CompactFaceVector(me); me.face.EnableFFAdjacency(); tri::Clean<MCMesh>::RemoveTVertexByFlip(me,20,true); tri::Clean<MCMesh>::RemoveFaceFoldByFlip(me); tri::io::ExporterPLY<MCMesh>::Save(me,p.OutNameSimpVec.back().c_str(),saveMask); } } int t3=clock(); //-------- TotSav+=t3-t2; } printf("Mesh Saved '%s': %8d vertices, %8d faces \n",(filename+std::string(".ply")).c_str(),me.vn,me.fn); printf("Adding Meshes %8i\n",TotAdd); printf("MC %8i\n",TotMC); printf("Saving %8i\n",TotSav); printf("Total %8i\n",TotAdd+TotMC+TotSav); } else { printf("----------- skipping SubBlock %2i %2i %2i ----------\n",xx,yy,zz); } } }; //end PlyMC class template < class MeshType> class PlyMCTriEdgeCollapse: public MCTriEdgeCollapse< MeshType, PlyMCTriEdgeCollapse<MeshType> > { public: typedef MCTriEdgeCollapse< MeshType, PlyMCTriEdgeCollapse > MCTEC; typedef typename MeshType::VertexType::EdgeType EdgeType; inline PlyMCTriEdgeCollapse( const EdgeType &p, int i) :MCTEC(p,i){} }; template< class MeshType> void MCSimplify( MeshType &m, float absoluteError, bool preserveBB, vcg::CallBackPos *cb) { typedef PlyMCTriEdgeCollapse<MeshType> MyColl; tri::UpdateBounding<MeshType>::Box(m); tri::UpdateTopology<MeshType>::VertexFace(m); vcg::LocalOptimization<MeshType> DeciSession(m); MyColl::bb()=m.bbox; MyColl::preserveBBox()=preserveBB; if(absoluteError==0) { // guess the mc side. // In a MC mesh the vertices are on the egdes of the cells. and the edges are (mostly) on face of the cells. // If you have 2 vert over the same face xy they share z std::vector<float> ZSet; typename MeshType::FaceIterator fi; for(fi = m.face.begin();fi!=m.face.end();++fi) if(!(*fi).IsD()) { Point3f v0=(*fi).V(0)->P(); Point3f v1=(*fi).V(1)->P(); Point3f v2=(*fi).V(2)->P(); if(v0[2]==v1[2] && v0[1]!=v1[1] && v0[0]!=v1[0]) ZSet.push_back(v0[2]); if(v0[2]==v2[2] && v0[1]!=v1[1] && v2[0]!=v2[0]) ZSet.push_back(v0[2]); if(v1[2]==v2[2] && v1[1]!=v1[1] && v2[0]!=v2[0]) ZSet.push_back(v0[2]); if(ZSet.size()>100) break; } std::sort(ZSet.begin(),ZSet.end()); std::vector<float>::iterator lastV = std::unique(ZSet.begin(),ZSet.end()); ZSet.resize(lastV-ZSet.begin()); float Delta=0; for(size_t i = 0; i< ZSet.size()-1;++i) { Delta = std::max(ZSet[i+1]-ZSet[i],Delta); //qDebug("%f",Delta); } absoluteError= Delta/4.0f; } //qDebug("Simplifying at absoluteError=%f",absoluteError); float TargetError = absoluteError; char buf[1024]; DeciSession.template Init< MyColl > (); MyColl::areaThr()=TargetError*TargetError; DeciSession.SetTimeBudget(1.0f); if(TargetError < std::numeric_limits<float>::max() ) DeciSession.SetTargetMetric(TargetError); while(DeciSession.DoOptimization() && DeciSession.currMetric < TargetError) { sprintf(buf,"Simplyfing %7i err %9g \r",m.fn,DeciSession.currMetric); if (cb) cb(int(100.0f*DeciSession.currMetric/TargetError),buf); } } } // end namespace tri } // end namespace vcg #endif
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 4; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,16);t1++) { lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32)); ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(32*t2-Nz,4)),4*t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(16*t1+Ny+29,4)),floord(32*t2+Ny+28,4)),floord(32*t1-32*t2+Nz+Ny+27,4));t3++) { for (t4=max(max(max(0,ceild(t1-7,8)),ceild(32*t2-Nz-124,128)),ceild(4*t3-Ny-124,128));t4<=min(min(min(min(floord(4*t3+Nx,128),floord(Nt+Nx-4,128)),floord(16*t1+Nx+29,128)),floord(32*t2+Nx+28,128)),floord(32*t1-32*t2+Nz+Nx+27,128));t4++) { for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),4*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),4*t3+2),128*t4+126),32*t1-32*t2+Nz+29);t5++) { for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) { lbv=max(128*t4,t5+1); ubv=min(128*t4+127,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
ASTMatchers.h
//===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements matchers to be used together with the MatchFinder to // match AST nodes. // // Matchers are created by generator functions, which can be combined in // a functional in-language DSL to express queries over the C++ AST. // // For example, to match a class with a certain name, one would call: // cxxRecordDecl(hasName("MyClass")) // which returns a matcher that can be used to find all AST nodes that declare // a class named 'MyClass'. // // For more complicated match expressions we're often interested in accessing // multiple parts of the matched AST nodes once a match is found. In that case, // call `.bind("name")` on match expressions that match the nodes you want to // access. // // For example, when we're interested in child classes of a certain class, we // would write: // cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child"))) // When the match is found via the MatchFinder, a user provided callback will // be called with a BoundNodes instance that contains a mapping from the // strings that we provided for the `.bind()` calls to the nodes that were // matched. // In the given example, each time our matcher finds a match we get a callback // where "child" is bound to the RecordDecl node of the matching child // class declaration. // // See ASTMatchersInternal.h for a more in-depth explanation of the // implementation details of the matcher framework. // // See ASTMatchFinder.h for how to use the generated matchers to run over // an AST. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #include "clang/AST/ASTContext.h" #include "clang/AST/ASTTypeTraits.h" #include "clang/AST/Attr.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/LambdaCapture.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/OperationKinds.h" #include "clang/AST/ParentMapContext.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" #include "clang/ASTMatchers/ASTMatchersInternal.h" #include "clang/ASTMatchers/ASTMatchersMacros.h" #include "clang/Basic/AttrKinds.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TypeTraits.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Regex.h" #include <cassert> #include <cstddef> #include <iterator> #include <limits> #include <string> #include <utility> #include <vector> namespace clang { namespace ast_matchers { /// Maps string IDs to AST nodes matched by parts of a matcher. /// /// The bound nodes are generated by calling \c bind("id") on the node matchers /// of the nodes we want to access later. /// /// The instances of BoundNodes are created by \c MatchFinder when the user's /// callbacks are executed every time a match is found. class BoundNodes { public: /// Returns the AST node bound to \c ID. /// /// Returns NULL if there was no node bound to \c ID or if there is a node but /// it cannot be converted to the specified type. template <typename T> const T *getNodeAs(StringRef ID) const { return MyBoundNodes.getNodeAs<T>(ID); } /// Type of mapping from binding identifiers to bound nodes. This type /// is an associative container with a key type of \c std::string and a value /// type of \c clang::DynTypedNode using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap; /// Retrieve mapping from binding identifiers to bound nodes. const IDToNodeMap &getMap() const { return MyBoundNodes.getMap(); } private: friend class internal::BoundNodesTreeBuilder; /// Create BoundNodes from a pre-filled map of bindings. BoundNodes(internal::BoundNodesMap &MyBoundNodes) : MyBoundNodes(MyBoundNodes) {} internal::BoundNodesMap MyBoundNodes; }; /// Types of matchers for the top-level classes in the AST class /// hierarchy. /// @{ using DeclarationMatcher = internal::Matcher<Decl>; using StatementMatcher = internal::Matcher<Stmt>; using TypeMatcher = internal::Matcher<QualType>; using TypeLocMatcher = internal::Matcher<TypeLoc>; using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>; using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>; using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>; /// @} /// Matches any node. /// /// Useful when another matcher requires a child matcher, but there's no /// additional constraint. This will often be used with an explicit conversion /// to an \c internal::Matcher<> type such as \c TypeMatcher. /// /// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g., /// \code /// "int* p" and "void f()" in /// int* p; /// void f(); /// \endcode /// /// Usable as: Any Matcher inline internal::TrueMatcher anything() { return internal::TrueMatcher(); } /// Matches the top declaration context. /// /// Given /// \code /// int X; /// namespace NS { /// int Y; /// } // namespace NS /// \endcode /// decl(hasDeclContext(translationUnitDecl())) /// matches "int X", but not "int Y". extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl> translationUnitDecl; /// Matches typedef declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefDecl() /// matches "typedef int X", but not "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl> typedefDecl; /// Matches typedef name declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefNameDecl() /// matches "typedef int X" and "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl> typedefNameDecl; /// Matches type alias declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typeAliasDecl() /// matches "using Y = int", but not "typedef int X" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl> typeAliasDecl; /// Matches type alias template declarations. /// /// typeAliasTemplateDecl() matches /// \code /// template <typename T> /// using Y = X<T>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl> typeAliasTemplateDecl; /// Matches AST nodes that were expanded within the main-file. /// /// Example matches X but not Y /// (matcher = cxxRecordDecl(isExpansionInMainFile()) /// \code /// #include <Y.h> /// class X {}; /// \endcode /// Y.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInMainFile, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); return SourceManager.isInMainFile( SourceManager.getExpansionLoc(Node.getBeginLoc())); } /// Matches AST nodes that were expanded within system-header-files. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInSystemHeader()) /// \code /// #include <SystemHeader.h> /// class X {}; /// \endcode /// SystemHeader.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } return SourceManager.isInSystemHeader(ExpansionLoc); } /// Matches AST nodes that were expanded within files whose name is /// partially matching a given regex. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*")) /// \code /// #include "ASTMatcher.h" /// class X {}; /// \endcode /// ASTMatcher.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER_P(isExpansionInFileMatching, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), std::string, RegExp) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } auto FileEntry = SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc)); if (!FileEntry) { return false; } auto Filename = FileEntry->getName(); llvm::Regex RE(RegExp); return RE.match(Filename); } /// Matches statements that are (transitively) expanded from the named macro. /// Does not match if only part of the statement is expanded from that macro or /// if different parts of the the statement are expanded from different /// appearances of the macro. /// /// FIXME: Change to be a polymorphic matcher that works on any syntactic /// node. There's nothing `Stmt`-specific about it. AST_MATCHER_P(Stmt, isExpandedFromMacro, llvm::StringRef, MacroName) { // Verifies that the statement' beginning and ending are both expanded from // the same instance of the given macro. auto& Context = Finder->getASTContext(); llvm::Optional<SourceLocation> B = internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context); if (!B) return false; llvm::Optional<SourceLocation> E = internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context); if (!E) return false; return *B == *E; } /// Matches declarations. /// /// Examples matches \c X, \c C, and the friend declaration inside \c C; /// \code /// void X(); /// class C { /// friend X; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<Decl> decl; /// Matches a declaration of a linkage specification. /// /// Given /// \code /// extern "C" {} /// \endcode /// linkageSpecDecl() /// matches "extern "C" {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl> linkageSpecDecl; /// Matches a declaration of anything that could have a name. /// /// Example matches \c X, \c S, the anonymous union type, \c i, and \c U; /// \code /// typedef int X; /// struct S { /// union { /// int i; /// } U; /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl; /// Matches a declaration of label. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelDecl() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl; /// Matches a declaration of a namespace. /// /// Given /// \code /// namespace {} /// namespace test {} /// \endcode /// namespaceDecl() /// matches "namespace {}" and "namespace test {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl> namespaceDecl; /// Matches a declaration of a namespace alias. /// /// Given /// \code /// namespace test {} /// namespace alias = ::test; /// \endcode /// namespaceAliasDecl() /// matches "namespace alias" but not "namespace test" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl> namespaceAliasDecl; /// Matches class, struct, and union declarations. /// /// Example matches \c X, \c Z, \c U, and \c S /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl; /// Matches C++ class declarations. /// /// Example matches \c X, \c Z /// \code /// class X; /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl> cxxRecordDecl; /// Matches C++ class template declarations. /// /// Example matches \c Z /// \code /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl> classTemplateDecl; /// Matches C++ class template specializations. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// \endcode /// classTemplateSpecializationDecl() /// matches the specializations \c A<int> and \c A<double> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplateSpecializationDecl> classTemplateSpecializationDecl; /// Matches C++ class template partial specializations. /// /// Given /// \code /// template<class T1, class T2, int I> /// class A {}; /// /// template<class T, int I> /// class A<T, T*, I> {}; /// /// template<> /// class A<int, int, 1> {}; /// \endcode /// classTemplatePartialSpecializationDecl() /// matches the specialization \c A<T,T*,I> but not \c A<int,int,1> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplatePartialSpecializationDecl> classTemplatePartialSpecializationDecl; /// Matches declarator declarations (field, variable, function /// and non-type template parameter declarations). /// /// Given /// \code /// class X { int y; }; /// \endcode /// declaratorDecl() /// matches \c int y. extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl> declaratorDecl; /// Matches parameter variable declarations. /// /// Given /// \code /// void f(int x); /// \endcode /// parmVarDecl() /// matches \c int x. extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl> parmVarDecl; /// Matches C++ access specifier declarations. /// /// Given /// \code /// class C { /// public: /// int a; /// }; /// \endcode /// accessSpecDecl() /// matches 'public:' extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl> accessSpecDecl; /// Matches constructor initializers. /// /// Examples matches \c i(42). /// \code /// class C { /// C() : i(42) {} /// int i; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<CXXCtorInitializer> cxxCtorInitializer; /// Matches template arguments. /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgument() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument; /// Matches template name. /// /// Given /// \code /// template <typename T> class X { }; /// X<int> xi; /// \endcode /// templateName() /// matches 'X' in X<int>. extern const internal::VariadicAllOfMatcher<TemplateName> templateName; /// Matches non-type template parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// nonTypeTemplateParmDecl() /// matches 'N', but not 'T'. extern const internal::VariadicDynCastAllOfMatcher<Decl, NonTypeTemplateParmDecl> nonTypeTemplateParmDecl; /// Matches template type parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'T', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl> templateTypeParmDecl; /// Matches public C++ declarations and C++ base specifers that specify public /// inheritance. /// /// Examples: /// \code /// class C { /// public: int a; // fieldDecl(isPublic()) matches 'a' /// protected: int b; /// private: int c; /// }; /// \endcode /// /// \code /// class Base {}; /// class Derived1 : public Base {}; // matches 'Base' /// struct Derived2 : Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isPublic, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_public; } /// Matches protected C++ declarations and C++ base specifers that specify /// protected inheritance. /// /// Examples: /// \code /// class C { /// public: int a; /// protected: int b; // fieldDecl(isProtected()) matches 'b' /// private: int c; /// }; /// \endcode /// /// \code /// class Base {}; /// class Derived : protected Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isProtected, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_protected; } /// Matches private C++ declarations and C++ base specifers that specify private /// inheritance. /// /// Examples: /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; // fieldDecl(isPrivate()) matches 'c' /// }; /// \endcode /// /// \code /// struct Base {}; /// struct Derived1 : private Base {}; // matches 'Base' /// class Derived2 : Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isPrivate, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_private; } /// Matches non-static data members that are bit-fields. /// /// Given /// \code /// class C { /// int a : 2; /// int b; /// }; /// \endcode /// fieldDecl(isBitField()) /// matches 'int a;' but not 'int b;'. AST_MATCHER(FieldDecl, isBitField) { return Node.isBitField(); } /// Matches non-static data members that are bit-fields of the specified /// bit width. /// /// Given /// \code /// class C { /// int a : 2; /// int b : 4; /// int c : 2; /// }; /// \endcode /// fieldDecl(hasBitWidth(2)) /// matches 'int a;' and 'int c;' but not 'int b;'. AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) { return Node.isBitField() && Node.getBitWidthValue(Finder->getASTContext()) == Width; } /// Matches non-static data members that have an in-class initializer. /// /// Given /// \code /// class C { /// int a = 2; /// int b = 3; /// int c; /// }; /// \endcode /// fieldDecl(hasInClassInitializer(integerLiteral(equals(2)))) /// matches 'int a;' but not 'int b;'. /// fieldDecl(hasInClassInitializer(anything())) /// matches 'int a;' and 'int b;' but not 'int c;'. AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getInClassInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// Determines whether the function is "main", which is the entry point /// into an executable program. AST_MATCHER(FunctionDecl, isMain) { return Node.isMain(); } /// Matches the specialized template of a specialization declaration. /// /// Given /// \code /// template<typename T> class A {}; #1 /// template<> class A<int> {}; #2 /// \endcode /// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl())) /// matches '#2' with classTemplateDecl() matching the class template /// declaration of 'A' at #1. AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate, internal::Matcher<ClassTemplateDecl>, InnerMatcher) { const ClassTemplateDecl* Decl = Node.getSpecializedTemplate(); return (Decl != nullptr && InnerMatcher.matches(*Decl, Finder, Builder)); } /// Matches a declaration that has been implicitly added /// by the compiler (eg. implicit default/copy constructors). AST_MATCHER(Decl, isImplicit) { return Node.isImplicit(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl that have at least one TemplateArgument matching the given /// InnerMatcher. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// /// template<typename T> f() {}; /// void func() { f<int>(); }; /// \endcode /// /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(asString("int")))) /// matches the specialization \c A<int> /// /// functionDecl(hasAnyTemplateArgument(refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P( hasAnyTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder, Builder); } /// Causes all nested matchers to be matched with the specified traversal kind. /// /// Given /// \code /// void foo() /// { /// int i = 3.0; /// } /// \endcode /// The matcher /// \code /// traverse(TK_IgnoreImplicitCastsAndParentheses, /// varDecl(hasInitializer(floatLiteral().bind("init"))) /// ) /// \endcode /// matches the variable declaration with "init" bound to the "3.0". template <typename T> internal::Matcher<T> traverse(TraversalKind TK, const internal::Matcher<T> &InnerMatcher) { return internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>(); } template <typename T> internal::BindableMatcher<T> traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) { return internal::BindableMatcher<T>( internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>()); } template <typename... T> internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>> traverse(TraversalKind TK, const internal::VariadicOperatorMatcher<T...> &InnerMatcher) { return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>( TK, InnerMatcher); } template <template <typename ToArg, typename FromArg> class ArgumentAdapterT, typename T, typename ToTypes> internal::TraversalWrapper< internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>> traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor< ArgumentAdapterT, T, ToTypes> &InnerMatcher) { return internal::TraversalWrapper< internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>(TK, InnerMatcher); } template <template <typename T, typename P1> class MatcherT, typename P1, typename ReturnTypesF> internal::TraversalWrapper< internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>> traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam1< MatcherT, P1, ReturnTypesF> &InnerMatcher) { return internal::TraversalWrapper< internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>( TK, InnerMatcher); } template <template <typename T, typename P1, typename P2> class MatcherT, typename P1, typename P2, typename ReturnTypesF> internal::TraversalWrapper< internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>> traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam2< MatcherT, P1, P2, ReturnTypesF> &InnerMatcher) { return internal::TraversalWrapper< internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>( TK, InnerMatcher); } /// Matches expressions that match InnerMatcher after any implicit AST /// nodes are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// class C {}; /// C a = C(); /// C b; /// C c = b; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr()))) /// \endcode /// would match the declarations for a, b, and c. /// While /// \code /// varDecl(hasInitializer(cxxConstructExpr())) /// \endcode /// only match the declarations for b and c. AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder); } /// Matches expressions that match InnerMatcher after any implicit casts /// are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = 0; /// const int c = a; /// int *d = arr; /// long e = (long) 0l; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringImpCasts(declRefExpr()))) /// \endcode /// would match the declarations for a, b, c, and d, but not e. /// While /// \code /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// \endcode /// only match the declarations for b, c, and d. AST_MATCHER_P(Expr, ignoringImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after parentheses and /// casts are stripped off. /// /// Implicit and non-C Style casts are also discarded. /// Given /// \code /// int a = 0; /// char b = (0); /// void* c = reinterpret_cast<char*>(0); /// char d = char(0); /// \endcode /// The matcher /// varDecl(hasInitializer(ignoringParenCasts(integerLiteral()))) /// would match the declarations for a, b, c, and d. /// while /// varDecl(hasInitializer(integerLiteral())) /// only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after implicit casts and /// parentheses are stripped off. /// /// Explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = (0); /// const int c = a; /// int *d = (arr); /// long e = ((long) 0l); /// \endcode /// The matchers /// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr()))) /// would match the declarations for a, b, c, and d, but not e. /// while /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// would only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder); } /// Matches types that match InnerMatcher after any parens are stripped. /// /// Given /// \code /// void (*fp)(void); /// \endcode /// The matcher /// \code /// varDecl(hasType(pointerType(pointee(ignoringParens(functionType()))))) /// \endcode /// would match the declaration for fp. AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>, InnerMatcher, 0) { return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder); } /// Overload \c ignoringParens for \c Expr. /// /// Given /// \code /// const char* str = ("my-string"); /// \endcode /// The matcher /// \code /// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral()))) /// \endcode /// would match the implicit cast resulting from the assignment. AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>, InnerMatcher, 1) { const Expr *E = Node.IgnoreParens(); return InnerMatcher.matches(*E, Finder, Builder); } /// Matches expressions that are instantiation-dependent even if it is /// neither type- nor value-dependent. /// /// In the following example, the expression sizeof(sizeof(T() + T())) /// is instantiation-dependent (since it involves a template parameter T), /// but is neither type- nor value-dependent, since the type of the inner /// sizeof is known (std::size_t) and therefore the size of the outer /// sizeof is known. /// \code /// template<typename T> /// void f(T x, T y) { sizeof(sizeof(T() + T()); } /// \endcode /// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T()) AST_MATCHER(Expr, isInstantiationDependent) { return Node.isInstantiationDependent(); } /// Matches expressions that are type-dependent because the template type /// is not yet instantiated. /// /// For example, the expressions "x" and "x + y" are type-dependent in /// the following code, but "y" is not type-dependent: /// \code /// template<typename T> /// void add(T x, int y) { /// x + y; /// } /// \endcode /// expr(isTypeDependent()) matches x + y AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); } /// Matches expression that are value-dependent because they contain a /// non-type template parameter. /// /// For example, the array bound of "Chars" in the following example is /// value-dependent. /// \code /// template<int Size> int f() { return Size; } /// \endcode /// expr(isValueDependent()) matches return Size AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl where the n'th TemplateArgument matches the given InnerMatcher. /// /// Given /// \code /// template<typename T, typename U> class A {}; /// A<bool, int> b; /// A<int, bool> c; /// /// template<typename T> void f() {} /// void func() { f<int>(); }; /// \endcode /// classTemplateSpecializationDecl(hasTemplateArgument( /// 1, refersToType(asString("int")))) /// matches the specialization \c A<bool, int> /// /// functionDecl(hasTemplateArgument(0, refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P2( hasTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); if (List.size() <= N) return false; return InnerMatcher.matches(List[N], Finder, Builder); } /// Matches if the number of template arguments equals \p N. /// /// Given /// \code /// template<typename T> struct C {}; /// C<int> c; /// \endcode /// classTemplateSpecializationDecl(templateArgumentCountIs(1)) /// matches C<int>. AST_POLYMORPHIC_MATCHER_P( templateArgumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType), unsigned, N) { return internal::getTemplateSpecializationArgs(Node).size() == N; } /// Matches a TemplateArgument that refers to a certain type. /// /// Given /// \code /// struct X {}; /// template<typename T> struct A {}; /// A<X> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(class(hasName("X"))))) /// matches the specialization \c A<X> AST_MATCHER_P(TemplateArgument, refersToType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Type) return false; return InnerMatcher.matches(Node.getAsType(), Finder, Builder); } /// Matches a TemplateArgument that refers to a certain template. /// /// Given /// \code /// template<template <typename> class S> class X {}; /// template<typename T> class Y {}; /// X<Y> xi; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToTemplate(templateName()))) /// matches the specialization \c X<Y> AST_MATCHER_P(TemplateArgument, refersToTemplate, internal::Matcher<TemplateName>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Template) return false; return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder); } /// Matches a canonical TemplateArgument that refers to a certain /// declaration. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToDeclaration(fieldDecl(hasName("next"))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, refersToDeclaration, internal::Matcher<Decl>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Declaration) return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder); return false; } /// Matches a sugar TemplateArgument that refers to a certain expression. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// templateSpecializationType(hasAnyTemplateArgument( /// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next")))))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Expression) return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder); return false; } /// Matches a TemplateArgument that is an integral value. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(isIntegral())) /// matches the implicit instantiation of C in C<42> /// with isIntegral() matching 42. AST_MATCHER(TemplateArgument, isIntegral) { return Node.getKind() == TemplateArgument::Integral; } /// Matches a TemplateArgument that referes to an integral type. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(refersToIntegralType(asString("int")))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, refersToIntegralType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Integral) return false; return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder); } /// Matches a TemplateArgument of integral type with a given value. /// /// Note that 'Value' is a string as the template argument's value is /// an arbitrary precision integer. 'Value' must be euqal to the canonical /// representation of that integral value in base 10. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(equalsIntegralValue("42"))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, equalsIntegralValue, std::string, Value) { if (Node.getKind() != TemplateArgument::Integral) return false; return Node.getAsIntegral().toString(10) == Value; } /// Matches an Objective-C autorelease pool statement. /// /// Given /// \code /// @autoreleasepool { /// int x = 0; /// } /// \endcode /// autoreleasePoolStmt(stmt()) matches the declaration of "x" /// inside the autorelease pool. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAutoreleasePoolStmt> autoreleasePoolStmt; /// Matches any value declaration. /// /// Example matches A, B, C and F /// \code /// enum X { A, B, C }; /// void F(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl; /// Matches C++ constructor declarations. /// /// Example matches Foo::Foo() and Foo::Foo(int) /// \code /// class Foo { /// public: /// Foo(); /// Foo(int); /// int DoSomething(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl> cxxConstructorDecl; /// Matches explicit C++ destructor declarations. /// /// Example matches Foo::~Foo() /// \code /// class Foo { /// public: /// virtual ~Foo(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl> cxxDestructorDecl; /// Matches enum declarations. /// /// Example matches X /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl; /// Matches enum constants. /// /// Example matches A, B, C /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl> enumConstantDecl; /// Matches tag declarations. /// /// Example matches X, Z, U, S, E /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// enum E { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl; /// Matches method declarations. /// /// Example matches y /// \code /// class X { void y(); }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> cxxMethodDecl; /// Matches conversion operator declarations. /// /// Example matches the operator. /// \code /// class X { operator int() const; }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl> cxxConversionDecl; /// Matches user-defined and implicitly generated deduction guide. /// /// Example matches the deduction guide. /// \code /// template<typename T> /// class X { X(int) }; /// X(int) -> X<int>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl> cxxDeductionGuideDecl; /// Matches variable declarations. /// /// Note: this does not match declarations of member variables, which are /// "field" declarations in Clang parlance. /// /// Example matches a /// \code /// int a; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl; /// Matches field declarations. /// /// Given /// \code /// class X { int m; }; /// \endcode /// fieldDecl() /// matches 'm'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl; /// Matches indirect field declarations. /// /// Given /// \code /// struct X { struct { int a; }; }; /// \endcode /// indirectFieldDecl() /// matches 'a'. extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl> indirectFieldDecl; /// Matches function declarations. /// /// Example matches f /// \code /// void f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl> functionDecl; /// Matches C++ function template declarations. /// /// Example matches f /// \code /// template<class T> void f(T t) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl> functionTemplateDecl; /// Matches friend declarations. /// /// Given /// \code /// class X { friend void foo(); }; /// \endcode /// friendDecl() /// matches 'friend void foo()'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl; /// Matches statements. /// /// Given /// \code /// { ++a; } /// \endcode /// stmt() /// matches both the compound statement '{ ++a; }' and '++a'. extern const internal::VariadicAllOfMatcher<Stmt> stmt; /// Matches declaration statements. /// /// Given /// \code /// int a; /// \endcode /// declStmt() /// matches 'int a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt; /// Matches member expressions. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// int a; static int b; /// }; /// \endcode /// memberExpr() /// matches this->x, x, y.x, a, this->b extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr; /// Matches unresolved member expressions. /// /// Given /// \code /// struct X { /// template <class T> void f(); /// void g(); /// }; /// template <class T> void h() { X x; x.f<T>(); x.g(); } /// \endcode /// unresolvedMemberExpr() /// matches x.f<T> extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr> unresolvedMemberExpr; /// Matches member expressions where the actual member referenced could not be /// resolved because the base expression or the member name was dependent. /// /// Given /// \code /// template <class T> void f() { T t; t.g(); } /// \endcode /// cxxDependentScopeMemberExpr() /// matches t.g extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDependentScopeMemberExpr> cxxDependentScopeMemberExpr; /// Matches call expressions. /// /// Example matches x.y() and y() /// \code /// X x; /// x.y(); /// y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr; /// Matches call expressions which were resolved using ADL. /// /// Example matches y(x) but not y(42) or NS::y(x). /// \code /// namespace NS { /// struct X {}; /// void y(X); /// } /// /// void y(...); /// /// void test() { /// NS::X x; /// y(x); // Matches /// NS::y(x); // Doesn't match /// y(42); // Doesn't match /// using NS::y; /// y(x); // Found by both unqualified lookup and ADL, doesn't match // } /// \endcode AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); } /// Matches lambda expressions. /// /// Example matches [&](){return 5;} /// \code /// [&](){return 5;} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr; /// Matches member call expressions. /// /// Example matches x.y() /// \code /// X x; /// x.y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr> cxxMemberCallExpr; /// Matches ObjectiveC Message invocation expressions. /// /// The innermost message send invokes the "alloc" class method on the /// NSString class, while the outermost message send invokes the /// "initWithString" instance method on the object returned from /// NSString's "alloc". This matcher should match both message sends. /// \code /// [[NSString alloc] initWithString:@"Hello"] /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr> objcMessageExpr; /// Matches Objective-C interface declarations. /// /// Example matches Foo /// \code /// @interface Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl> objcInterfaceDecl; /// Matches Objective-C implementation declarations. /// /// Example matches Foo /// \code /// @implementation Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl> objcImplementationDecl; /// Matches Objective-C protocol declarations. /// /// Example matches FooDelegate /// \code /// @protocol FooDelegate /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl> objcProtocolDecl; /// Matches Objective-C category declarations. /// /// Example matches Foo (Additions) /// \code /// @interface Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl> objcCategoryDecl; /// Matches Objective-C category definitions. /// /// Example matches Foo (Additions) /// \code /// @implementation Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl> objcCategoryImplDecl; /// Matches Objective-C method declarations. /// /// Example matches both declaration and definition of -[Foo method] /// \code /// @interface Foo /// - (void)method; /// @end /// /// @implementation Foo /// - (void)method {} /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl> objcMethodDecl; /// Matches block declarations. /// /// Example matches the declaration of the nameless block printing an input /// integer. /// /// \code /// myFunc(^(int p) { /// printf("%d", p); /// }) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl> blockDecl; /// Matches Objective-C instance variable declarations. /// /// Example matches _enabled /// \code /// @implementation Foo { /// BOOL _enabled; /// } /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl> objcIvarDecl; /// Matches Objective-C property declarations. /// /// Example matches enabled /// \code /// @interface Foo /// @property BOOL enabled; /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl> objcPropertyDecl; /// Matches Objective-C \@throw statements. /// /// Example matches \@throw /// \code /// @throw obj; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt> objcThrowStmt; /// Matches Objective-C @try statements. /// /// Example matches @try /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt> objcTryStmt; /// Matches Objective-C @catch statements. /// /// Example matches @catch /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt> objcCatchStmt; /// Matches Objective-C @finally statements. /// /// Example matches @finally /// \code /// @try {} /// @finally {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt> objcFinallyStmt; /// Matches expressions that introduce cleanups to be run at the end /// of the sub-expression's evaluation. /// /// Example matches std::string() /// \code /// const std::string str = std::string(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups> exprWithCleanups; /// Matches init list expressions. /// /// Given /// \code /// int a[] = { 1, 2 }; /// struct B { int x, y; }; /// B b = { 5, 6 }; /// \endcode /// initListExpr() /// matches "{ 1, 2 }" and "{ 5, 6 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr> initListExpr; /// Matches the syntactic form of init list expressions /// (if expression have it). AST_MATCHER_P(InitListExpr, hasSyntacticForm, internal::Matcher<Expr>, InnerMatcher) { const Expr *SyntForm = Node.getSyntacticForm(); return (SyntForm != nullptr && InnerMatcher.matches(*SyntForm, Finder, Builder)); } /// Matches C++ initializer list expressions. /// /// Given /// \code /// std::vector<int> a({ 1, 2, 3 }); /// std::vector<int> b = { 4, 5 }; /// int c[] = { 6, 7 }; /// std::pair<int, int> d = { 8, 9 }; /// \endcode /// cxxStdInitializerListExpr() /// matches "{ 1, 2, 3 }" and "{ 4, 5 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStdInitializerListExpr> cxxStdInitializerListExpr; /// Matches implicit initializers of init list expressions. /// /// Given /// \code /// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 }; /// \endcode /// implicitValueInitExpr() /// matches "[0].y" (implicitly) extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr> implicitValueInitExpr; /// Matches paren list expressions. /// ParenListExprs don't have a predefined type and are used for late parsing. /// In the final AST, they can be met in template declarations. /// /// Given /// \code /// template<typename T> class X { /// void f() { /// X x(*this); /// int a = 0, b = 1; int i = (a, b); /// } /// }; /// \endcode /// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b) /// has a predefined type and is a ParenExpr, not a ParenListExpr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr> parenListExpr; /// Matches substitutions of non-type template parameters. /// /// Given /// \code /// template <int N> /// struct A { static const int n = N; }; /// struct B : public A<42> {}; /// \endcode /// substNonTypeTemplateParmExpr() /// matches "N" in the right-hand side of "static const int n = N;" extern const internal::VariadicDynCastAllOfMatcher<Stmt, SubstNonTypeTemplateParmExpr> substNonTypeTemplateParmExpr; /// Matches using declarations. /// /// Given /// \code /// namespace X { int x; } /// using X::x; /// \endcode /// usingDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl; /// Matches using namespace declarations. /// /// Given /// \code /// namespace X { int x; } /// using namespace X; /// \endcode /// usingDirectiveDecl() /// matches \code using namespace X \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl> usingDirectiveDecl; /// Matches reference to a name that can be looked up during parsing /// but could not be resolved to a specific declaration. /// /// Given /// \code /// template<typename T> /// T foo() { T a; return a; } /// template<typename T> /// void bar() { /// foo<T>(); /// } /// \endcode /// unresolvedLookupExpr() /// matches \code foo<T>() \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr> unresolvedLookupExpr; /// Matches unresolved using value declarations. /// /// Given /// \code /// template<typename X> /// class C : private X { /// using X::x; /// }; /// \endcode /// unresolvedUsingValueDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingValueDecl> unresolvedUsingValueDecl; /// Matches unresolved using value declarations that involve the /// typename. /// /// Given /// \code /// template <typename T> /// struct Base { typedef T Foo; }; /// /// template<typename T> /// struct S : private Base<T> { /// using typename Base<T>::Foo; /// }; /// \endcode /// unresolvedUsingTypenameDecl() /// matches \code using Base<T>::Foo \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingTypenameDecl> unresolvedUsingTypenameDecl; /// Matches a constant expression wrapper. /// /// Example matches the constant in the case statement: /// (matcher = constantExpr()) /// \code /// switch (a) { /// case 37: break; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr> constantExpr; /// Matches parentheses used in expressions. /// /// Example matches (foo() + 1) /// \code /// int foo() { return 1; } /// int a = (foo() + 1); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr; /// Matches constructor call expressions (including implicit ones). /// /// Example matches string(ptr, n) and ptr within arguments of f /// (matcher = cxxConstructExpr()) /// \code /// void f(const string &a, const string &b); /// char *ptr; /// int n; /// f(string(ptr, n), ptr); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr> cxxConstructExpr; /// Matches unresolved constructor call expressions. /// /// Example matches T(t) in return statement of f /// (matcher = cxxUnresolvedConstructExpr()) /// \code /// template <typename T> /// void f(const T& t) { return T(t); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXUnresolvedConstructExpr> cxxUnresolvedConstructExpr; /// Matches implicit and explicit this expressions. /// /// Example matches the implicit this expression in "return i". /// (matcher = cxxThisExpr()) /// \code /// struct foo { /// int i; /// int f() { return i; } /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr> cxxThisExpr; /// Matches nodes where temporaries are created. /// /// Example matches FunctionTakesString(GetStringByValue()) /// (matcher = cxxBindTemporaryExpr()) /// \code /// FunctionTakesString(GetStringByValue()); /// FunctionTakesStringByPointer(GetStringPointer()); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr> cxxBindTemporaryExpr; /// Matches nodes where temporaries are materialized. /// /// Example: Given /// \code /// struct T {void func();}; /// T f(); /// void g(T); /// \endcode /// materializeTemporaryExpr() matches 'f()' in these statements /// \code /// T u(f()); /// g(f()); /// f().func(); /// \endcode /// but does not match /// \code /// f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, MaterializeTemporaryExpr> materializeTemporaryExpr; /// Matches new expressions. /// /// Given /// \code /// new X; /// \endcode /// cxxNewExpr() /// matches 'new X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr; /// Matches delete expressions. /// /// Given /// \code /// delete X; /// \endcode /// cxxDeleteExpr() /// matches 'delete X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr> cxxDeleteExpr; /// Matches noexcept expressions. /// /// Given /// \code /// bool a() noexcept; /// bool b() noexcept(true); /// bool c() noexcept(false); /// bool d() noexcept(noexcept(a())); /// bool e = noexcept(b()) || noexcept(c()); /// \endcode /// cxxNoexceptExpr() /// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`. /// doesn't match the noexcept specifier in the declarations a, b, c or d. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr> cxxNoexceptExpr; /// Matches array subscript expressions. /// /// Given /// \code /// int i = a[1]; /// \endcode /// arraySubscriptExpr() /// matches "a[1]" extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr> arraySubscriptExpr; /// Matches the value of a default argument at the call site. /// /// Example matches the CXXDefaultArgExpr placeholder inserted for the /// default value of the second parameter in the call expression f(42) /// (matcher = cxxDefaultArgExpr()) /// \code /// void f(int x, int y = 0); /// f(42); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr> cxxDefaultArgExpr; /// Matches overloaded operator calls. /// /// Note that if an operator isn't overloaded, it won't match. Instead, use /// binaryOperator matcher. /// Currently it does not match operators such as new delete. /// FIXME: figure out why these do not match? /// /// Example matches both operator<<((o << b), c) and operator<<(o, b) /// (matcher = cxxOperatorCallExpr()) /// \code /// ostream &operator<< (ostream &out, int i) { }; /// ostream &o; int b = 1, c = 1; /// o << b << c; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr> cxxOperatorCallExpr; /// Matches expressions. /// /// Example matches x() /// \code /// void f() { x(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr; /// Matches expressions that refer to declarations. /// /// Example matches x in if (x) /// \code /// bool x; /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr; /// Matches a reference to an ObjCIvar. /// /// Example: matches "a" in "init" method: /// \code /// @implementation A { /// NSString *a; /// } /// - (void) init { /// a = @"hello"; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr> objcIvarRefExpr; /// Matches a reference to a block. /// /// Example: matches "^{}": /// \code /// void f() { ^{}(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr; /// Matches if statements. /// /// Example matches 'if (x) {}' /// \code /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt; /// Matches for statements. /// /// Example matches 'for (;;) {}' /// \code /// for (;;) {} /// int i[] = {1, 2, 3}; for (auto a : i); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt; /// Matches the increment statement of a for loop. /// /// Example: /// forStmt(hasIncrement(unaryOperator(hasOperatorName("++")))) /// matches '++x' in /// \code /// for (x; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Increment = Node.getInc(); return (Increment != nullptr && InnerMatcher.matches(*Increment, Finder, Builder)); } /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopInit(declStmt())) /// matches 'int x = 0' in /// \code /// for (int x = 0; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Init = Node.getInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches range-based for statements. /// /// cxxForRangeStmt() matches 'for (auto a : i)' /// \code /// int i[] = {1, 2, 3}; for (auto a : i); /// for(int j = 0; j < 5; ++j); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt> cxxForRangeStmt; /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopVariable(anything())) /// matches 'int x' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>, InnerMatcher) { const VarDecl *const Var = Node.getLoopVariable(); return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder)); } /// Matches the range initialization statement of a for loop. /// /// Example: /// forStmt(hasRangeInit(anything())) /// matches 'a' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>, InnerMatcher) { const Expr *const Init = Node.getRangeInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches while statements. /// /// Given /// \code /// while (true) {} /// \endcode /// whileStmt() /// matches 'while (true) {}'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt; /// Matches do statements. /// /// Given /// \code /// do {} while (true); /// \endcode /// doStmt() /// matches 'do {} while(true)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt; /// Matches break statements. /// /// Given /// \code /// while (true) { break; } /// \endcode /// breakStmt() /// matches 'break' extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt; /// Matches continue statements. /// /// Given /// \code /// while (true) { continue; } /// \endcode /// continueStmt() /// matches 'continue' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt> continueStmt; /// Matches return statements. /// /// Given /// \code /// return 1; /// \endcode /// returnStmt() /// matches 'return 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt; /// Matches goto statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// gotoStmt() /// matches 'goto FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt; /// Matches label statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelStmt() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt; /// Matches address of label statements (GNU extension). /// /// Given /// \code /// FOO: bar(); /// void *ptr = &&FOO; /// goto *bar; /// \endcode /// addrLabelExpr() /// matches '&&FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr> addrLabelExpr; /// Matches switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchStmt() /// matches 'switch(a)'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt; /// Matches case and default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchCase() /// matches 'case 42:' and 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase; /// Matches case statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// caseStmt() /// matches 'case 42:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt; /// Matches default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// defaultStmt() /// matches 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt> defaultStmt; /// Matches compound statements. /// /// Example matches '{}' and '{{}}' in 'for (;;) {{}}' /// \code /// for (;;) {{}} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt> compoundStmt; /// Matches catch statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxCatchStmt() /// matches 'catch(int i)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt> cxxCatchStmt; /// Matches try statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxTryStmt() /// matches 'try {}' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt; /// Matches throw expressions. /// /// \code /// try { throw 5; } catch(int i) {} /// \endcode /// cxxThrowExpr() /// matches 'throw 5' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr> cxxThrowExpr; /// Matches null statements. /// /// \code /// foo();; /// \endcode /// nullStmt() /// matches the second ';' extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt; /// Matches asm statements. /// /// \code /// int i = 100; /// __asm("mov al, 2"); /// \endcode /// asmStmt() /// matches '__asm("mov al, 2")' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt; /// Matches bool literals. /// /// Example matches true /// \code /// true /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr> cxxBoolLiteral; /// Matches string literals (also matches wide string literals). /// /// Example matches "abcd", L"abcd" /// \code /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral> stringLiteral; /// Matches character literals (also matches wchar_t). /// /// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral), /// though. /// /// Example matches 'a', L'a' /// \code /// char ch = 'a'; /// wchar_t chw = L'a'; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral> characterLiteral; /// Matches integer literals of all sizes / encodings, e.g. /// 1, 1L, 0x1 and 1U. /// /// Does not match character-encoded integers such as L'a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral> integerLiteral; /// Matches float literals of all sizes / encodings, e.g. /// 1.0, 1.0f, 1.0L and 1e10. /// /// Does not match implicit conversions such as /// \code /// float a = 10; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral> floatLiteral; /// Matches imaginary literals, which are based on integer and floating /// point literals e.g.: 1i, 1.0i extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral> imaginaryLiteral; /// Matches fixed point literals extern const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral> fixedPointLiteral; /// Matches user defined literal operator call. /// /// Example match: "foo"_suffix extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral> userDefinedLiteral; /// Matches compound (i.e. non-scalar) literals /// /// Example match: {1}, (1, 2) /// \code /// int array[4] = {1}; /// vector int myvec = (vector int)(1, 2); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr> compoundLiteralExpr; /// Matches nullptr literal. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr> cxxNullPtrLiteralExpr; /// Matches GNU __builtin_choose_expr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr> chooseExpr; /// Matches GNU __null expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr> gnuNullExpr; /// Matches atomic builtins. /// Example matches __atomic_load_n(ptr, 1) /// \code /// void foo() { int *ptr; __atomic_load_n(ptr, 1); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr; /// Matches statement expression (GNU extension). /// /// Example match: ({ int X = 4; X; }) /// \code /// int C = ({ int X = 4; X; }); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr; /// Matches binary operator expressions. /// /// Example matches a || b /// \code /// !(a || b) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator> binaryOperator; /// Matches unary operator expressions. /// /// Example matches !a /// \code /// !a || b /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator> unaryOperator; /// Matches conditional operator expressions. /// /// Example matches a ? b : c /// \code /// (a ? b : c) + 42 /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator> conditionalOperator; /// Matches binary conditional operator expressions (GNU extension). /// /// Example matches a ?: b /// \code /// (a ?: b) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryConditionalOperator> binaryConditionalOperator; /// Matches opaque value expressions. They are used as helpers /// to reference another expressions and can be met /// in BinaryConditionalOperators, for example. /// /// Example matches 'a' /// \code /// (a ?: c) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr> opaqueValueExpr; /// Matches a C++ static_assert declaration. /// /// Example: /// staticAssertExpr() /// matches /// static_assert(sizeof(S) == sizeof(int)) /// in /// \code /// struct S { /// int x; /// }; /// static_assert(sizeof(S) == sizeof(int)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl> staticAssertDecl; /// Matches a reinterpret_cast expression. /// /// Either the source expression or the destination type can be matched /// using has(), but hasDestinationType() is more specific and can be /// more readable. /// /// Example matches reinterpret_cast<char*>(&p) in /// \code /// void* p = reinterpret_cast<char*>(&p); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr> cxxReinterpretCastExpr; /// Matches a C++ static_cast expression. /// /// \see hasDestinationType /// \see reinterpretCast /// /// Example: /// cxxStaticCastExpr() /// matches /// static_cast<long>(8) /// in /// \code /// long eight(static_cast<long>(8)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr> cxxStaticCastExpr; /// Matches a dynamic_cast expression. /// /// Example: /// cxxDynamicCastExpr() /// matches /// dynamic_cast<D*>(&b); /// in /// \code /// struct B { virtual ~B() {} }; struct D : B {}; /// B b; /// D* p = dynamic_cast<D*>(&b); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr> cxxDynamicCastExpr; /// Matches a const_cast expression. /// /// Example: Matches const_cast<int*>(&r) in /// \code /// int n = 42; /// const int &r(n); /// int* p = const_cast<int*>(&r); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr> cxxConstCastExpr; /// Matches a C-style cast expression. /// /// Example: Matches (int) 2.2f in /// \code /// int i = (int) 2.2f; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr> cStyleCastExpr; /// Matches explicit cast expressions. /// /// Matches any cast expression written in user code, whether it be a /// C-style cast, a functional-style cast, or a keyword cast. /// /// Does not match implicit conversions. /// /// Note: the name "explicitCast" is chosen to match Clang's terminology, as /// Clang uses the term "cast" to apply to implicit conversions as well as to /// actual cast expressions. /// /// \see hasDestinationType. /// /// Example: matches all five of the casts in /// \code /// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42))))) /// \endcode /// but does not match the implicit conversion in /// \code /// long ell = 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr> explicitCastExpr; /// Matches the implicit cast nodes of Clang's AST. /// /// This matches many different places, including function call return value /// eliding, as well as any type conversions. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr> implicitCastExpr; /// Matches any cast nodes of Clang's AST. /// /// Example: castExpr() matches each of the following: /// \code /// (int) 3; /// const_cast<Expr *>(SubExpr); /// char c = 0; /// \endcode /// but does not match /// \code /// int i = (0); /// int k = 0; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr; /// Matches functional cast expressions /// /// Example: Matches Foo(bar); /// \code /// Foo f = bar; /// Foo g = (Foo) bar; /// Foo h = Foo(bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr> cxxFunctionalCastExpr; /// Matches functional cast expressions having N != 1 arguments /// /// Example: Matches Foo(bar, bar) /// \code /// Foo h = Foo(bar, bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr> cxxTemporaryObjectExpr; /// Matches predefined identifier expressions [C99 6.4.2.2]. /// /// Example: Matches __func__ /// \code /// printf("%s", __func__); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr> predefinedExpr; /// Matches C99 designated initializer expressions [C99 6.7.8]. /// /// Example: Matches { [2].y = 1.0, [0].x = 1.0 } /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr> designatedInitExpr; /// Matches designated initializer expressions that contain /// a specific number of designators. /// /// Example: Given /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }; /// \endcode /// designatorCountIs(2) /// matches '{ [2].y = 1.0, [0].x = 1.0 }', /// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'. AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) { return Node.size() == N; } /// Matches \c QualTypes in the clang AST. extern const internal::VariadicAllOfMatcher<QualType> qualType; /// Matches \c Types in the clang AST. extern const internal::VariadicAllOfMatcher<Type> type; /// Matches \c TypeLocs in the clang AST. extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc; /// Matches if any of the given matchers matches. /// /// Unlike \c anyOf, \c eachOf will generate a match result for each /// matching submatcher. /// /// For example, in: /// \code /// class A { int a; int b; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")), /// has(fieldDecl(hasName("b")).bind("v")))) /// \endcode /// will generate two results binding "v", the first of which binds /// the field declaration of \c a, the second the field declaration of /// \c b. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> eachOf; /// Matches if any of the given matchers matches. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> anyOf; /// Matches if all given matchers match. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> allOf; /// Matches any node regardless of the submatcher. /// /// However, \c optionally will retain any bindings generated by the submatcher. /// Useful when additional information which may or may not present about a main /// matching node is desired. /// /// For example, in: /// \code /// class Foo { /// int bar; /// } /// \endcode /// The matcher: /// \code /// cxxRecordDecl( /// optionally(has( /// fieldDecl(hasName("bar")).bind("var") /// ))).bind("record") /// \endcode /// will produce a result binding for both "record" and "var". /// The matcher will produce a "record" binding for even if there is no data /// member named "bar" in that class. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally; /// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL) /// /// Given /// \code /// Foo x = bar; /// int y = sizeof(x) + alignof(x); /// \endcode /// unaryExprOrTypeTraitExpr() /// matches \c sizeof(x) and \c alignof(x) extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryExprOrTypeTraitExpr> unaryExprOrTypeTraitExpr; /// Matches unary expressions that have a specific type of argument. /// /// Given /// \code /// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c); /// \endcode /// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int")) /// matches \c sizeof(a) and \c alignof(c) AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType, internal::Matcher<QualType>, InnerMatcher) { const QualType ArgumentType = Node.getTypeOfArgument(); return InnerMatcher.matches(ArgumentType, Finder, Builder); } /// Matches unary expressions of a certain kind. /// /// Given /// \code /// int x; /// int s = sizeof(x) + alignof(x) /// \endcode /// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf)) /// matches \c sizeof(x) /// /// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter /// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf"). AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) { return Node.getKind() == Kind; } /// Same as unaryExprOrTypeTraitExpr, but only matching /// alignof. inline internal::BindableMatcher<Stmt> alignOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)), InnerMatcher))); } /// Same as unaryExprOrTypeTraitExpr, but only matching /// sizeof. inline internal::BindableMatcher<Stmt> sizeOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(ofKind(UETT_SizeOf), InnerMatcher))); } /// Matches NamedDecl nodes that have the specified name. /// /// Supports specifying enclosing namespaces or classes by prefixing the name /// with '<enclosing>::'. /// Does not match typedefs of an underlying type with the given name. /// /// Example matches X (Name == "X") /// \code /// class X; /// \endcode /// /// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X") /// \code /// namespace a { namespace b { class X; } } /// \endcode inline internal::Matcher<NamedDecl> hasName(StringRef Name) { return internal::Matcher<NamedDecl>( new internal::HasNameMatcher({std::string(Name)})); } /// Matches NamedDecl nodes that have any of the specified names. /// /// This matcher is only provided as a performance optimization of hasName. /// \code /// hasAnyName(a, b, c) /// \endcode /// is equivalent to, but faster than /// \code /// anyOf(hasName(a), hasName(b), hasName(c)) /// \endcode extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef, internal::hasAnyNameFunc> hasAnyName; /// Matches NamedDecl nodes whose fully qualified names contain /// a substring matched by the given RegExp. /// /// Supports specifying enclosing namespaces or classes by /// prefixing the name with '<enclosing>::'. Does not match typedefs /// of an underlying type with the given name. /// /// Example matches X (regexp == "::X") /// \code /// class X; /// \endcode /// /// Example matches X (regexp is one of "::X", "^foo::.*X", among others) /// \code /// namespace foo { namespace bar { class X; } } /// \endcode AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) { assert(!RegExp.empty()); std::string FullNameString = "::" + Node.getQualifiedNameAsString(); llvm::Regex RE(RegExp); return RE.match(FullNameString); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// Given: /// \code /// class A { int operator*(); }; /// const A &operator<<(const A &a, const A &b); /// A a; /// a << a; // <-- This matches /// \endcode /// /// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the /// specified line and /// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*"))) /// matches the declaration of \c A. /// /// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl> inline internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)> hasOverloadedOperatorName(StringRef Name) { return internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>( {std::string(Name)}); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// hasAnyOverloadesOperatorName("+", "-") /// Is equivalent to /// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-")) extern const internal::VariadicFunction< internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>, StringRef, internal::hasAnyOverloadedOperatorNameFunc> hasAnyOverloadedOperatorName; /// Matches C++ classes that are directly or indirectly derived from a class /// matching \c Base, or Objective-C classes that directly or indirectly /// subclass a class matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, Z, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("NSObject")) /// \code /// @interface NSObject @end /// @interface Bar : NSObject @end /// \endcode /// /// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl> AST_POLYMORPHIC_MATCHER_P( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/false); } /// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ classes that have a direct or indirect base matching \p /// BaseSpecMatcher. /// /// Example: /// matcher hasAnyBase(hasType(cxxRecordDecl(hasName("SpecialBase"))))) /// \code /// class Foo; /// class Bar : Foo {}; /// class Baz : Bar {}; /// class SpecialBase; /// class Proxy : SpecialBase {}; // matches Proxy /// class IndirectlyDerived : Proxy {}; //matches IndirectlyDerived /// \endcode /// // FIXME: Refactor this and isDerivedFrom to reuse implementation. AST_MATCHER_P(CXXRecordDecl, hasAnyBase, internal::Matcher<CXXBaseSpecifier>, BaseSpecMatcher) { return internal::matchesAnyBase(Node, BaseSpecMatcher, Finder, Builder); } /// Similar to \c isDerivedFrom(), but also matches classes that directly /// match \c Base. AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { const auto M = anyOf(Base, isDerivedFrom(Base)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Overloaded method as shortcut for /// \c isSameOrDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isSameOrDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ or Objective-C classes that are directly derived from a class /// matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/true); } /// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDirectlyDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches the first method of a class or struct that satisfies \c /// InnerMatcher. /// /// Given: /// \code /// class A { void func(); }; /// class B { void member(); }; /// \endcode /// /// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of /// \c A but not \c B. AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(), Node.method_end(), Finder, Builder); } /// Matches the generated class of lambda expressions. /// /// Given: /// \code /// auto x = []{}; /// \endcode /// /// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of /// \c decltype(x) AST_MATCHER(CXXRecordDecl, isLambda) { return Node.isLambda(); } /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y /// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// Usable as: Any Matcher /// Note that has is direct matcher, so it also matches things like implicit /// casts and paren casts. If you are matching with expr then you should /// probably consider using ignoringParenImpCasts like: /// has(ignoringParenImpCasts(expr())). extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Z /// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasDescendantMatcher> hasDescendant; /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Y::X, Z::Y, Z::Y::X /// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; /// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X /// // inside Y. /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// As opposed to 'has', 'forEach' will cause a match for each result that /// matches instead of only on the first one. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher> forEach; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, A, A::X, B, B::C, B::C::X /// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; /// class A { class X {}; }; // Matches A, because A::X is a class of name /// // X inside A. /// class B { class C { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for /// each result that matches instead of only on the first one. /// /// Note: Recursively combined ForEachDescendant can cause many matches: /// cxxRecordDecl(forEachDescendant(cxxRecordDecl( /// forEachDescendant(cxxRecordDecl()) /// ))) /// will match 10 times (plus injected class name matches) on: /// \code /// class A { class B { class C { class D { class E {}; }; }; }; }; /// \endcode /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::ForEachDescendantMatcher> forEachDescendant; /// Matches if the node or any descendant matches. /// /// Generates results for each match. /// /// For example, in: /// \code /// class A { class B {}; class C {}; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(hasName("::A"), /// findAll(cxxRecordDecl(isDefinition()).bind("m"))) /// \endcode /// will generate results for \c A, \c B and \c C. /// /// Usable as: Any Matcher template <typename T> internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) { return eachOf(Matcher, forEachDescendant(Matcher)); } /// Matches AST nodes that have a parent that matches the provided /// matcher. /// /// Given /// \code /// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } } /// \endcode /// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }". /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasParentMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasParent; /// Matches AST nodes that have an ancestor that matches the provided /// matcher. /// /// Given /// \code /// void f() { if (true) { int x = 42; } } /// void g() { for (;;) { int x = 43; } } /// \endcode /// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasAncestorMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasAncestor; /// Matches if the provided matcher does not match. /// /// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X")))) /// \code /// class X {}; /// class Y {}; /// \endcode /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> unless; /// Matches a node if the declaration associated with that node /// matches the given matcher. /// /// The associated declaration is: /// - for type nodes, the declaration of the underlying type /// - for CallExpr, the declaration of the callee /// - for MemberExpr, the declaration of the referenced member /// - for CXXConstructExpr, the declaration of the constructor /// - for CXXNewExpr, the declaration of the operator new /// - for ObjCIvarExpr, the declaration of the ivar /// /// For type nodes, hasDeclaration will generally match the declaration of the /// sugared type. Given /// \code /// class X {}; /// typedef X Y; /// Y y; /// \endcode /// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the /// typedefDecl. A common use case is to match the underlying, desugared type. /// This can be achieved by using the hasUnqualifiedDesugaredType matcher: /// \code /// varDecl(hasType(hasUnqualifiedDesugaredType( /// recordType(hasDeclaration(decl()))))) /// \endcode /// In this matcher, the decl will match the CXXRecordDecl of class X. /// /// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>, /// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>, /// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>, /// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>, /// Matcher<TagType>, Matcher<TemplateSpecializationType>, /// Matcher<TemplateTypeParmType>, Matcher<TypedefType>, /// Matcher<UnresolvedUsingType> inline internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)> hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) { return internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)>(InnerMatcher); } /// Matches a \c NamedDecl whose underlying declaration matches the given /// matcher. /// /// Given /// \code /// namespace N { template<class T> void f(T t); } /// template <class T> void g() { using N::f; f(T()); } /// \endcode /// \c unresolvedLookupExpr(hasAnyDeclaration( /// namedDecl(hasUnderlyingDecl(hasName("::N::f"))))) /// matches the use of \c f in \c g() . AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>, InnerMatcher) { const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl(); return UnderlyingDecl != nullptr && InnerMatcher.matches(*UnderlyingDecl, Finder, Builder); } /// Matches on the implicit object argument of a member call expression, after /// stripping off any parentheses or implicit casts. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y {}; /// void z(Y y, X x) { y.m(); (g()).m(); x.m(); } /// \endcode /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y"))))) /// matches `y.m()` and `(g()).m()`. /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m()`. /// cxxMemberCallExpr(on(callExpr())) /// matches `(g()).m()`. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument() ->IgnoreParenImpCasts(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches on the receiver of an ObjectiveC Message expression. /// /// Example /// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *"))); /// matches the [webView ...] message invocation. /// \code /// NSString *webViewJavaScript = ... /// UIWebView *webView = ... /// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>, InnerMatcher) { const QualType TypeDecl = Node.getReceiverType(); return InnerMatcher.matches(TypeDecl, Finder, Builder); } /// Returns true when the Objective-C method declaration is a class method. /// /// Example /// matcher = objcMethodDecl(isClassMethod()) /// matches /// \code /// @interface I + (void)foo; @end /// \endcode /// but not /// \code /// @interface I - (void)bar; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isClassMethod) { return Node.isClassMethod(); } /// Returns true when the Objective-C method declaration is an instance method. /// /// Example /// matcher = objcMethodDecl(isInstanceMethod()) /// matches /// \code /// @interface I - (void)bar; @end /// \endcode /// but not /// \code /// @interface I + (void)foo; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isInstanceMethod) { return Node.isInstanceMethod(); } /// Returns true when the Objective-C message is sent to a class. /// /// Example /// matcher = objcMessageExpr(isClassMessage()) /// matches /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode /// but not /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isClassMessage) { return Node.isClassMessage(); } /// Returns true when the Objective-C message is sent to an instance. /// /// Example /// matcher = objcMessageExpr(isInstanceMessage()) /// matches /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// but not /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isInstanceMessage) { return Node.isInstanceMessage(); } /// Matches if the Objective-C message is sent to an instance, /// and the inner matcher matches on that instance. /// /// For example the method call in /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// is matched by /// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x")))))) AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>, InnerMatcher) { const Expr *ReceiverNode = Node.getInstanceReceiver(); return (ReceiverNode != nullptr && InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches when BaseName == Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) { Selector Sel = Node.getSelector(); return BaseName.compare(Sel.getAsString()) == 0; } /// Matches when at least one of the supplied string equals to the /// Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:")); /// matches both of the expressions below: /// \code /// [myObj methodA:argA]; /// [myObj methodB:argB]; /// \endcode extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>, StringRef, internal::hasAnySelectorFunc> hasAnySelector; /// Matches ObjC selectors whose name contains /// a substring matched by the given RegExp. /// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, matchesSelector, std::string, RegExp) { assert(!RegExp.empty()); std::string SelectorString = Node.getSelector().getAsString(); llvm::Regex RE(RegExp); return RE.match(SelectorString); } /// Matches when the selector is the empty selector /// /// Matches only when the selector of the objCMessageExpr is NULL. This may /// represent an error condition in the tree! AST_MATCHER(ObjCMessageExpr, hasNullSelector) { return Node.getSelector().isNull(); } /// Matches when the selector is a Unary Selector /// /// matcher = objCMessageExpr(matchesSelector(hasUnarySelector()); /// matches self.bodyView in the code below, but NOT the outer message /// invocation of "loadHTMLString:baseURL:". /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER(ObjCMessageExpr, hasUnarySelector) { return Node.getSelector().isUnarySelector(); } /// Matches when the selector is a keyword selector /// /// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame /// message expression in /// /// \code /// UIWebView *webView = ...; /// CGRect bodyFrame = webView.frame; /// bodyFrame.size.height = self.bodyContentHeight; /// webView.frame = bodyFrame; /// // ^---- matches here /// \endcode AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) { return Node.getSelector().isKeywordSelector(); } /// Matches when the selector has the specified number of arguments /// /// matcher = objCMessageExpr(numSelectorArgs(0)); /// matches self.bodyView in the code below /// /// matcher = objCMessageExpr(numSelectorArgs(2)); /// matches the invocation of "loadHTMLString:baseURL:" but not that /// of self.bodyView /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) { return Node.getSelector().getNumArgs() == N; } /// Matches if the call expression's callee expression matches. /// /// Given /// \code /// class Y { void x() { this->x(); x(); Y y; y.x(); } }; /// void f() { f(); } /// \endcode /// callExpr(callee(expr())) /// matches this->x(), x(), y.x(), f() /// with callee(...) /// matching this->x, x, y.x, f respectively /// /// Note: Callee cannot take the more general internal::Matcher<Expr> /// because this introduces ambiguous overloads with calls to Callee taking a /// internal::Matcher<Decl>, as the matcher hierarchy is purely /// implemented in terms of implicit casts. AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>, InnerMatcher) { const Expr *ExprNode = Node.getCallee(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the call expression's callee's declaration matches the /// given matcher. /// /// Example matches y.x() (matcher = callExpr(callee( /// cxxMethodDecl(hasName("x"))))) /// \code /// class Y { public: void x(); }; /// void z() { Y y; y.x(); } /// \endcode AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher, 1) { return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder); } /// Matches if the expression's or declaration's type matches a type /// matcher. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and U (matcher = typedefDecl(hasType(asString("int"))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// typedef int U; /// class Y { friend class X; }; /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl, ValueDecl), internal::Matcher<QualType>, InnerMatcher, 0) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return InnerMatcher.matches(QT, Finder, Builder); return false; } /// Overloaded to match the declaration of the expression's or value /// declaration's type. /// /// In case of a value declaration (for example a variable declaration), /// this resolves one layer of indirection. For example, in the value /// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of /// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the /// declaration of x. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// class Y { friend class X; }; /// \endcode /// /// Example matches class Derived /// (matcher = cxxRecordDecl(hasAnyBase(hasType(cxxRecordDecl(hasName("Base")))))) /// \code /// class Base {}; /// class Derived : Base {}; /// \endcode /// /// Usable as: Matcher<Expr>, Matcher<FriendDecl>, Matcher<ValueDecl>, /// Matcher<CXXBaseSpecifier> AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl, CXXBaseSpecifier), internal::Matcher<Decl>, InnerMatcher, 1) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder); return false; } /// Matches if the type location of the declarator decl's type matches /// the inner matcher. /// /// Given /// \code /// int x; /// \endcode /// declaratorDecl(hasTypeLoc(loc(asString("int")))) /// matches int x AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) { if (!Node.getTypeSourceInfo()) // This happens for example for implicit destructors. return false; return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder); } /// Matches if the matched type is represented by the given string. /// /// Given /// \code /// class Y { public: void x(); }; /// void z() { Y* y; y->x(); } /// \endcode /// cxxMemberCallExpr(on(hasType(asString("class Y *")))) /// matches y->x() AST_MATCHER_P(QualType, asString, std::string, Name) { return Name == Node.getAsString(); } /// Matches if the matched type is a pointer type and the pointee type /// matches the specified matcher. /// /// Example matches y->x() /// (matcher = cxxMemberCallExpr(on(hasType(pointsTo /// cxxRecordDecl(hasName("Y"))))))) /// \code /// class Y { public: void x(); }; /// void z() { Y *y; y->x(); } /// \endcode AST_MATCHER_P( QualType, pointsTo, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isAnyPointerType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Overloaded to match the pointee type's declaration. AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>, InnerMatcher, 1) { return pointsTo(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if the matched type matches the unqualified desugared /// type of the matched node. /// /// For example, in: /// \code /// class A {}; /// using B = A; /// \endcode /// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches /// both B and A. AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>, InnerMatcher) { return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder, Builder); } /// Matches if the matched type is a reference type and the referenced /// type matches the specified matcher. /// /// Example matches X &x and const X &y /// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X")))))) /// \code /// class X { /// void a(X b) { /// X &x = b; /// const X &y = b; /// } /// }; /// \endcode AST_MATCHER_P(QualType, references, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isReferenceType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Matches QualTypes whose canonical type matches InnerMatcher. /// /// Given: /// \code /// typedef int &int_ref; /// int a; /// int_ref b = a; /// \endcode /// /// \c varDecl(hasType(qualType(referenceType()))))) will not match the /// declaration of b but \c /// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does. AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>, InnerMatcher) { if (Node.isNull()) return false; return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder); } /// Overloaded to match the referenced type's declaration. AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>, InnerMatcher, 1) { return references(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches on the implicit object argument of a member call expression. Unlike /// `on`, matches the argument directly without stripping away anything. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y { void g(); }; /// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); } /// \endcode /// cxxMemberCallExpr(onImplicitObjectArgument(hasType( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`. /// cxxMemberCallExpr(on(callExpr())) /// does not match `(g()).m()`, because the parens are not ignored. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the type of the expression's implicit object argument either /// matches the InnerMatcher, or is a pointer to a type that matches the /// InnerMatcher. /// /// Given /// \code /// class Y { public: void m(); }; /// class X : public Y { void g(); }; /// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); } /// \endcode /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `p->m()` and `x.m()`. /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("X"))))) /// matches `x.g()`. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<QualType>, InnerMatcher, 0) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Overloaded to match the type's declaration. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<Decl>, InnerMatcher, 1) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Matches a DeclRefExpr that refers to a declaration that matches the /// specified matcher. /// /// Example matches x in if(x) /// (matcher = declRefExpr(to(varDecl(hasName("x"))))) /// \code /// bool x; /// if (x) {} /// \endcode AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>, InnerMatcher) { const Decl *DeclNode = Node.getDecl(); return (DeclNode != nullptr && InnerMatcher.matches(*DeclNode, Finder, Builder)); } /// Matches a \c DeclRefExpr that refers to a declaration through a /// specific using shadow declaration. /// /// Given /// \code /// namespace a { void f() {} } /// using a::f; /// void g() { /// f(); // Matches this .. /// a::f(); // .. but not this. /// } /// \endcode /// declRefExpr(throughUsingDecl(anything())) /// matches \c f() AST_MATCHER_P(DeclRefExpr, throughUsingDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { const NamedDecl *FoundDecl = Node.getFoundDecl(); if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl)) return InnerMatcher.matches(*UsingDecl, Finder, Builder); return false; } /// Matches an \c OverloadExpr if any of the declarations in the set of /// overloads matches the given matcher. /// /// Given /// \code /// template <typename T> void foo(T); /// template <typename T> void bar(T); /// template <typename T> void baz(T t) { /// foo(t); /// bar(t); /// } /// \endcode /// unresolvedLookupExpr(hasAnyDeclaration( /// functionTemplateDecl(hasName("foo")))) /// matches \c foo in \c foo(t); but not \c bar in \c bar(t); AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(), Node.decls_end(), Finder, Builder); } /// Matches the Decl of a DeclStmt which has a single declaration. /// /// Given /// \code /// int a, b; /// int c; /// \endcode /// declStmt(hasSingleDecl(anything())) /// matches 'int c;' but not 'int a, b;'. AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) { if (Node.isSingleDecl()) { const Decl *FoundDecl = Node.getSingleDecl(); return InnerMatcher.matches(*FoundDecl, Finder, Builder); } return false; } /// Matches a variable declaration that has an initializer expression /// that matches the given matcher. /// /// Example matches x (matcher = varDecl(hasInitializer(callExpr()))) /// \code /// bool y() { return true; } /// bool x = y(); /// \endcode AST_MATCHER_P( VarDecl, hasInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getAnyInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// \brief Matches a static variable with local scope. /// /// Example matches y (matcher = varDecl(isStaticLocal())) /// \code /// void f() { /// int x; /// static int y; /// } /// static int z; /// \endcode AST_MATCHER(VarDecl, isStaticLocal) { return Node.isStaticLocal(); } /// Matches a variable declaration that has function scope and is a /// non-static local variable. /// /// Example matches x (matcher = varDecl(hasLocalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasLocalStorage) { return Node.hasLocalStorage(); } /// Matches a variable declaration that does not have local storage. /// /// Example matches y and z (matcher = varDecl(hasGlobalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasGlobalStorage) { return Node.hasGlobalStorage(); } /// Matches a variable declaration that has automatic storage duration. /// /// Example matches x, but not y, z, or a. /// (matcher = varDecl(hasAutomaticStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasAutomaticStorageDuration) { return Node.getStorageDuration() == SD_Automatic; } /// Matches a variable declaration that has static storage duration. /// It includes the variable declared at namespace scope and those declared /// with "static" and "extern" storage class specifiers. /// /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// static int b; /// extern int c; /// varDecl(hasStaticStorageDuration()) /// matches the function declaration y, a, b and c. /// \endcode AST_MATCHER(VarDecl, hasStaticStorageDuration) { return Node.getStorageDuration() == SD_Static; } /// Matches a variable declaration that has thread storage duration. /// /// Example matches z, but not x, z, or a. /// (matcher = varDecl(hasThreadStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasThreadStorageDuration) { return Node.getStorageDuration() == SD_Thread; } /// Matches a variable declaration that is an exception variable from /// a C++ catch block, or an Objective-C \@catch statement. /// /// Example matches x (matcher = varDecl(isExceptionVariable()) /// \code /// void f(int y) { /// try { /// } catch (int x) { /// } /// } /// \endcode AST_MATCHER(VarDecl, isExceptionVariable) { return Node.isExceptionVariable(); } /// Checks that a call expression or a constructor call expression has /// a specific number of arguments (including absent default arguments). /// /// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2))) /// \code /// void f(int x, int y); /// f(0, 0); /// \endcode AST_POLYMORPHIC_MATCHER_P(argumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr, ObjCMessageExpr), unsigned, N) { return Node.getNumArgs() == N; } /// Matches the n'th argument of a call expression or a constructor /// call expression. /// /// Example matches y in x(y) /// (matcher = callExpr(hasArgument(0, declRefExpr()))) /// \code /// void x(int) { int y; x(y); } /// \endcode AST_POLYMORPHIC_MATCHER_P2(hasArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr, ObjCMessageExpr), unsigned, N, internal::Matcher<Expr>, InnerMatcher) { return (N < Node.getNumArgs() && InnerMatcher.matches( *Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches the n'th item of an initializer list expression. /// /// Example matches y. /// (matcher = initListExpr(hasInit(0, expr()))) /// \code /// int x{y}. /// \endcode AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { return N < Node.getNumInits() && InnerMatcher.matches(*Node.getInit(N), Finder, Builder); } /// Matches declaration statements that contain a specific number of /// declarations. /// /// Example: Given /// \code /// int a, b; /// int c; /// int d = 2, e; /// \endcode /// declCountIs(2) /// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'. AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) { return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N; } /// Matches the n'th declaration of a declaration statement. /// /// Note that this does not work for global declarations because the AST /// breaks up multiple-declaration DeclStmt's into multiple single-declaration /// DeclStmt's. /// Example: Given non-global declarations /// \code /// int a, b = 0; /// int c; /// int d = 2, e; /// \endcode /// declStmt(containsDeclaration( /// 0, varDecl(hasInitializer(anything())))) /// matches only 'int d = 2, e;', and /// declStmt(containsDeclaration(1, varDecl())) /// \code /// matches 'int a, b = 0' as well as 'int d = 2, e;' /// but 'int c;' is not matched. /// \endcode AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N, internal::Matcher<Decl>, InnerMatcher) { const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end()); if (N >= NumDecls) return false; DeclStmt::const_decl_iterator Iterator = Node.decl_begin(); std::advance(Iterator, N); return InnerMatcher.matches(**Iterator, Finder, Builder); } /// Matches a C++ catch statement that has a catch-all handler. /// /// Given /// \code /// try { /// // ... /// } catch (int) { /// // ... /// } catch (...) { /// // ... /// } /// \endcode /// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int). AST_MATCHER(CXXCatchStmt, isCatchAll) { return Node.getExceptionDecl() == nullptr; } /// Matches a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl( /// hasAnyConstructorInitializer(anything()) /// ))) /// record matches Foo, hasAnyConstructorInitializer matches foo_(1) AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(), Node.init_end(), Finder, Builder); } /// Matches the field declaration of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// forField(hasName("foo_")))))) /// matches Foo /// with forField matching foo_ AST_MATCHER_P(CXXCtorInitializer, forField, internal::Matcher<FieldDecl>, InnerMatcher) { const FieldDecl *NodeAsDecl = Node.getAnyMember(); return (NodeAsDecl != nullptr && InnerMatcher.matches(*NodeAsDecl, Finder, Builder)); } /// Matches the initializer expression of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// withInitializer(integerLiteral(equals(1))))))) /// matches Foo /// with withInitializer matching (1) AST_MATCHER_P(CXXCtorInitializer, withInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr* NodeAsExpr = Node.getInit(); return (NodeAsExpr != nullptr && InnerMatcher.matches(*NodeAsExpr, Finder, Builder)); } /// Matches a constructor initializer if it is explicitly written in /// code (as opposed to implicitly added by the compiler). /// /// Given /// \code /// struct Foo { /// Foo() { } /// Foo(int) : foo_("A") { } /// string foo_; /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten())) /// will match Foo(int), but not Foo() AST_MATCHER(CXXCtorInitializer, isWritten) { return Node.isWritten(); } /// Matches a constructor initializer if it is initializing a base, as /// opposed to a member. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer())) /// will match E(), but not match D(int). AST_MATCHER(CXXCtorInitializer, isBaseInitializer) { return Node.isBaseInitializer(); } /// Matches a constructor initializer if it is initializing a member, as /// opposed to a base. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer())) /// will match D(int), but not match E(). AST_MATCHER(CXXCtorInitializer, isMemberInitializer) { return Node.isMemberInitializer(); } /// Matches any argument of a call expression or a constructor call /// expression, or an ObjC-message-send expression. /// /// Given /// \code /// void x(int, int, int) { int y; x(1, y, 42); } /// \endcode /// callExpr(hasAnyArgument(declRefExpr())) /// matches x(1, y, 42) /// with hasAnyArgument(...) /// matching y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// void foo(I *i) { [i f:12]; } /// \endcode /// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12)))) /// matches [i f:12] AST_POLYMORPHIC_MATCHER_P(hasAnyArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), internal::Matcher<Expr>, InnerMatcher) { for (const Expr *Arg : Node.arguments()) { BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Arg, Finder, &Result)) { *Builder = std::move(Result); return true; } } return false; } /// Matches any capture of a lambda expression. /// /// Given /// \code /// void foo() { /// int x; /// auto f = [x](){}; /// } /// \endcode /// lambdaExpr(hasAnyCapture(anything())) /// matches [x](){}; AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>, InnerMatcher, 0) { for (const LambdaCapture &Capture : Node.captures()) { if (Capture.capturesVariable()) { BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) { *Builder = std::move(Result); return true; } } } return false; } /// Matches any capture of 'this' in a lambda expression. /// /// Given /// \code /// struct foo { /// void bar() { /// auto f = [this](){}; /// } /// } /// \endcode /// lambdaExpr(hasAnyCapture(cxxThisExpr())) /// matches [this](){}; AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<CXXThisExpr>, InnerMatcher, 1) { return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) { return LC.capturesThis(); }); } /// Matches a constructor call expression which uses list initialization. AST_MATCHER(CXXConstructExpr, isListInitialization) { return Node.isListInitialization(); } /// Matches a constructor call expression which requires /// zero initialization. /// /// Given /// \code /// void foo() { /// struct point { double x; double y; }; /// point pt[2] = { { 1.0, 2.0 } }; /// } /// \endcode /// initListExpr(has(cxxConstructExpr(requiresZeroInitialization())) /// will match the implicit array filler for pt[1]. AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) { return Node.requiresZeroInitialization(); } /// Matches the n'th parameter of a function or an ObjC method /// declaration or a block. /// /// Given /// \code /// class X { void f(int x) {} }; /// \endcode /// cxxMethodDecl(hasParameter(0, hasType(varDecl()))) /// matches f(int x) {} /// with hasParameter(...) /// matching int x /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasParameter(0, hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P2(hasParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), unsigned, N, internal::Matcher<ParmVarDecl>, InnerMatcher) { return (N < Node.parameters().size() && InnerMatcher.matches(*Node.parameters()[N], Finder, Builder)); } /// Matches all arguments and their respective ParmVarDecl. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// \endcode /// callExpr( /// forEachArgumentWithParam( /// declRefExpr(to(varDecl(hasName("y")))), /// parmVarDecl(hasType(isInteger())) /// )) /// matches f(y); /// with declRefExpr(...) /// matching int y /// and parmVarDecl(...) /// matching int i AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<ParmVarDecl>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, ParamMatcher)))), callExpr(callee(functionDecl( hasParameter(ParamIndex, ParamMatcher)))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; } } ++ParamIndex; } *Builder = std::move(Result); return Matched; } /// Matches the ParmVarDecl nodes that are at the N'th position in the parameter /// list. The parameter list could be that of either a block, function, or /// objc-method. /// /// /// Given /// /// \code /// void f(int a, int b, int c) { /// } /// \endcode /// /// ``parmVarDecl(isAtPosition(0))`` matches ``int a``. /// /// ``parmVarDecl(isAtPosition(1))`` matches ``int b``. AST_MATCHER_P(ParmVarDecl, isAtPosition, unsigned, N) { const clang::DeclContext *Context = Node.getParentFunctionOrMethod(); if (const auto *Decl = dyn_cast_or_null<FunctionDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; if (const auto *Decl = dyn_cast_or_null<BlockDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; if (const auto *Decl = dyn_cast_or_null<ObjCMethodDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; return false; } /// Matches any parameter of a function or an ObjC method declaration or a /// block. /// /// Does not match the 'this' parameter of a method. /// /// Given /// \code /// class X { void f(int x, int y, int z) {} }; /// \endcode /// cxxMethodDecl(hasAnyParameter(hasName("y"))) /// matches f(int x, int y, int z) {} /// with hasAnyParameter(...) /// matching int y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. /// /// For blocks, given /// \code /// b = ^(int y) { printf("%d", y) }; /// \endcode /// /// the matcher blockDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of the block b with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P(hasAnyParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), internal::Matcher<ParmVarDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(), Node.param_end(), Finder, Builder); } /// Matches \c FunctionDecls and \c FunctionProtoTypes that have a /// specific parameter count. /// /// Given /// \code /// void f(int i) {} /// void g(int i, int j) {} /// void h(int i, int j); /// void j(int i); /// void k(int x, int y, int z, ...); /// \endcode /// functionDecl(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(3)) /// matches \c k AST_POLYMORPHIC_MATCHER_P(parameterCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType), unsigned, N) { return Node.getNumParams() == N; } /// Matches \c FunctionDecls that have a noreturn attribute. /// /// Given /// \code /// void nope(); /// [[noreturn]] void a(); /// __attribute__((noreturn)) void b(); /// struct c { [[noreturn]] c(); }; /// \endcode /// functionDecl(isNoReturn()) /// matches all of those except /// \code /// void nope(); /// \endcode AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); } /// Matches the return type of a function declaration. /// /// Given: /// \code /// class X { int f() { return 1; } }; /// \endcode /// cxxMethodDecl(returns(asString("int"))) /// matches int f() { return 1; } AST_MATCHER_P(FunctionDecl, returns, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getReturnType(), Finder, Builder); } /// Matches extern "C" function or variable declarations. /// /// Given: /// \code /// extern "C" void f() {} /// extern "C" { void g() {} } /// void h() {} /// extern "C" int x = 1; /// extern "C" int y = 2; /// int z = 3; /// \endcode /// functionDecl(isExternC()) /// matches the declaration of f and g, but not the declaration of h. /// varDecl(isExternC()) /// matches the declaration of x and y, but not the declaration of z. AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.isExternC(); } /// Matches variable/function declarations that have "static" storage /// class specifier ("static" keyword) written in the source. /// /// Given: /// \code /// static void f() {} /// static int i = 0; /// extern int j; /// int k; /// \endcode /// functionDecl(isStaticStorageClass()) /// matches the function declaration f. /// varDecl(isStaticStorageClass()) /// matches the variable declaration i. AST_POLYMORPHIC_MATCHER(isStaticStorageClass, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.getStorageClass() == SC_Static; } /// Matches deleted function declarations. /// /// Given: /// \code /// void Func(); /// void DeletedFunc() = delete; /// \endcode /// functionDecl(isDeleted()) /// matches the declaration of DeletedFunc, but not Func. AST_MATCHER(FunctionDecl, isDeleted) { return Node.isDeleted(); } /// Matches defaulted function declarations. /// /// Given: /// \code /// class A { ~A(); }; /// class B { ~B() = default; }; /// \endcode /// functionDecl(isDefaulted()) /// matches the declaration of ~B, but not ~A. AST_MATCHER(FunctionDecl, isDefaulted) { return Node.isDefaulted(); } /// Matches functions that have a dynamic exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() noexcept(true); /// void i() noexcept(false); /// void j() throw(); /// void k() throw(int); /// void l() throw(...); /// \endcode /// functionDecl(hasDynamicExceptionSpec()) and /// functionProtoType(hasDynamicExceptionSpec()) /// match the declarations of j, k, and l, but not f, g, h, or i. AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node)) return FnTy->hasDynamicExceptionSpec(); return false; } /// Matches functions that have a non-throwing exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() throw(); /// void i() throw(int); /// void j() noexcept(false); /// \endcode /// functionDecl(isNoThrow()) and functionProtoType(isNoThrow()) /// match the declarations of g, and h, but not f, i or j. AST_POLYMORPHIC_MATCHER(isNoThrow, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node); // If the function does not have a prototype, then it is assumed to be a // throwing function (as it would if the function did not have any exception // specification). if (!FnTy) return false; // Assume the best for any unresolved exception specification. if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType())) return true; return FnTy->isNothrow(); } /// Matches constexpr variable and function declarations, /// and if constexpr. /// /// Given: /// \code /// constexpr int foo = 42; /// constexpr int bar(); /// void baz() { if constexpr(1 > 0) {} } /// \endcode /// varDecl(isConstexpr()) /// matches the declaration of foo. /// functionDecl(isConstexpr()) /// matches the declaration of bar. /// ifStmt(isConstexpr()) /// matches the if statement in baz. AST_POLYMORPHIC_MATCHER(isConstexpr, AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl, FunctionDecl, IfStmt)) { return Node.isConstexpr(); } /// Matches selection statements with initializer. /// /// Given: /// \code /// void foo() { /// if (int i = foobar(); i > 0) {} /// switch (int i = foobar(); i) {} /// for (auto& a = get_range(); auto& x : a) {} /// } /// void bar() { /// if (foobar() > 0) {} /// switch (foobar()) {} /// for (auto& x : get_range()) {} /// } /// \endcode /// ifStmt(hasInitStatement(anything())) /// matches the if statement in foo but not in bar. /// switchStmt(hasInitStatement(anything())) /// matches the switch statement in foo but not in bar. /// cxxForRangeStmt(hasInitStatement(anything())) /// matches the range for statement in foo but not in bar. AST_POLYMORPHIC_MATCHER_P(hasInitStatement, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt, CXXForRangeStmt), internal::Matcher<Stmt>, InnerMatcher) { const Stmt *Init = Node.getInit(); return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder); } /// Matches the condition expression of an if statement, for loop, /// switch statement or conditional operator. /// /// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true)))) /// \code /// if (true) {} /// \endcode AST_POLYMORPHIC_MATCHER_P( hasCondition, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt, SwitchStmt, AbstractConditionalOperator), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Condition = Node.getCond(); return (Condition != nullptr && InnerMatcher.matches(*Condition, Finder, Builder)); } /// Matches the then-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true))))) /// \code /// if (false) true; else false; /// \endcode AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Then = Node.getThen(); return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder)); } /// Matches the else-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true))))) /// \code /// if (false) false; else true; /// \endcode AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Else = Node.getElse(); return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder)); } /// Matches if a node equals a previously bound node. /// /// Matches a node if it equals the node previously bound to \p ID. /// /// Given /// \code /// class X { int a; int b; }; /// \endcode /// cxxRecordDecl( /// has(fieldDecl(hasName("a"), hasType(type().bind("t")))), /// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t")))))) /// matches the class \c X, as \c a and \c b have the same type. /// /// Note that when multiple matches are involved via \c forEach* matchers, /// \c equalsBoundNodes acts as a filter. /// For example: /// compoundStmt( /// forEachDescendant(varDecl().bind("d")), /// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d")))))) /// will trigger a match for each combination of variable declaration /// and reference to that variable declaration within a compound statement. AST_POLYMORPHIC_MATCHER_P(equalsBoundNode, AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type, QualType), std::string, ID) { // FIXME: Figure out whether it makes sense to allow this // on any other node types. // For *Loc it probably does not make sense, as those seem // unique. For NestedNameSepcifier it might make sense, as // those also have pointer identity, but I'm not sure whether // they're ever reused. internal::NotEqualsBoundNodePredicate Predicate; Predicate.ID = ID; Predicate.Node = DynTypedNode::create(Node); return Builder->removeBindings(Predicate); } /// Matches the condition variable statement in an if statement. /// /// Given /// \code /// if (A* a = GetAPointer()) {} /// \endcode /// hasConditionVariableStatement(...) /// matches 'A* a = GetAPointer()'. AST_MATCHER_P(IfStmt, hasConditionVariableStatement, internal::Matcher<DeclStmt>, InnerMatcher) { const DeclStmt* const DeclarationStatement = Node.getConditionVariableDeclStmt(); return DeclarationStatement != nullptr && InnerMatcher.matches(*DeclarationStatement, Finder, Builder); } /// Matches the index expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasIndex(integerLiteral())) /// matches \c i[1] with the \c integerLiteral() matching \c 1 AST_MATCHER_P(ArraySubscriptExpr, hasIndex, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getIdx()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches the base expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasBase(implicitCastExpr( /// hasSourceExpression(declRefExpr())))) /// matches \c i[1] with the \c declRefExpr() matching \c i AST_MATCHER_P(ArraySubscriptExpr, hasBase, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getBase()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches a 'for', 'while', 'do while' statement or a function /// definition that has a given body. /// /// Given /// \code /// for (;;) {} /// \endcode /// hasBody(compoundStmt()) /// matches 'for (;;) {}' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasBody, AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt, WhileStmt, CXXForRangeStmt, FunctionDecl), internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches compound statements where at least one substatement matches /// a given matcher. Also matches StmtExprs that have CompoundStmt as children. /// /// Given /// \code /// { {}; 1+2; } /// \endcode /// hasAnySubstatement(compoundStmt()) /// matches '{ {}; 1+2; }' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement, AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt, StmtExpr), internal::Matcher<Stmt>, InnerMatcher) { const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node); return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(), CS->body_end(), Finder, Builder); } /// Checks that a compound statement contains a specific number of /// child statements. /// /// Example: Given /// \code /// { for (;;) {} } /// \endcode /// compoundStmt(statementCountIs(0))) /// matches '{}' /// but does not match the outer compound statement. AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) { return Node.size() == N; } /// Matches literals that are equal to the given value of type ValueT. /// /// Given /// \code /// f('\0', false, 3.14, 42); /// \endcode /// characterLiteral(equals(0)) /// matches '\0' /// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0)) /// match false /// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2)) /// match 3.14 /// integerLiteral(equals(42)) /// matches 42 /// /// Note that you cannot directly match a negative numeric literal because the /// minus sign is not part of the literal: It is a unary operator whose operand /// is the positive numeric literal. Instead, you must use a unaryOperator() /// matcher to match the minus sign: /// /// unaryOperator(hasOperatorName("-"), /// hasUnaryOperand(integerLiteral(equals(13)))) /// /// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>, /// Matcher<FloatingLiteral>, Matcher<IntegerLiteral> template <typename ValueT> internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT> equals(const ValueT &Value) { return internal::PolymorphicMatcherWithParam1< internal::ValueEqualsMatcher, ValueT>(Value); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), bool, Value, 0) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), unsigned, Value, 1) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, FloatingLiteral, IntegerLiteral), double, Value, 2) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } /// Matches the operator Name of operator expressions (binary or /// unary). /// /// Example matches a || b (matcher = binaryOperator(hasOperatorName("||"))) /// \code /// !(a || b) /// \endcode AST_POLYMORPHIC_MATCHER_P(hasOperatorName, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator), std::string, Name) { return Name == Node.getOpcodeStr(Node.getOpcode()); } /// Matches operator expressions (binary or unary) that have any of the /// specified names. /// /// hasAnyOperatorName("+", "-") /// Is equivalent to /// anyOf(hasOperatorName("+"), hasOperatorName("-")) extern const internal::VariadicFunction< internal::PolymorphicMatcherWithParam1< internal::HasAnyOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator)>, StringRef, internal::hasAnyOperatorNameFunc> hasAnyOperatorName; /// Matches all kinds of assignment operators. /// /// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 = s2 /// (matcher = cxxOperatorCallExpr(isAssignmentOperator())) /// \code /// struct S { S& operator=(const S&); }; /// void x() { S s1, s2; s1 = s2; } /// \endcode AST_POLYMORPHIC_MATCHER(isAssignmentOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr)) { return Node.isAssignmentOp(); } /// Matches comparison operators. /// /// Example 1: matches a == b (matcher = binaryOperator(isComparisonOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 < s2 /// (matcher = cxxOperatorCallExpr(isComparisonOperator())) /// \code /// struct S { bool operator<(const S& other); }; /// void x(S s1, S s2) { bool b1 = s1 < s2; } /// \endcode AST_POLYMORPHIC_MATCHER(isComparisonOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr)) { return Node.isComparisonOp(); } /// Matches the left hand side of binary operator expressions. /// /// Example matches a (matcher = binaryOperator(hasLHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasLHS, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *LeftHandSide = Node.getLHS(); return (LeftHandSide != nullptr && InnerMatcher.matches(*LeftHandSide, Finder, Builder)); } /// Matches the right hand side of binary operator expressions. /// /// Example matches b (matcher = binaryOperator(hasRHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasRHS, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *RightHandSide = Node.getRHS(); return (RightHandSide != nullptr && InnerMatcher.matches(*RightHandSide, Finder, Builder)); } /// Matches if either the left hand side or the right hand side of a /// binary operator matches. inline internal::Matcher<BinaryOperator> hasEitherOperand( const internal::Matcher<Expr> &InnerMatcher) { return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher)); } /// Matches if both matchers match with opposite sides of the binary operator. /// /// Example matcher = binaryOperator(hasOperands(integerLiteral(equals(1), /// integerLiteral(equals(2))) /// \code /// 1 + 2 // Match /// 2 + 1 // Match /// 1 + 1 // No match /// 2 + 2 // No match /// \endcode inline internal::Matcher<BinaryOperator> hasOperands(const internal::Matcher<Expr> &Matcher1, const internal::Matcher<Expr> &Matcher2) { return anyOf(allOf(hasLHS(Matcher1), hasRHS(Matcher2)), allOf(hasLHS(Matcher2), hasRHS(Matcher1))); } /// Matches if the operand of a unary operator matches. /// /// Example matches true (matcher = hasUnaryOperand( /// cxxBoolLiteral(equals(true)))) /// \code /// !true /// \endcode AST_MATCHER_P(UnaryOperator, hasUnaryOperand, internal::Matcher<Expr>, InnerMatcher) { const Expr * const Operand = Node.getSubExpr(); return (Operand != nullptr && InnerMatcher.matches(*Operand, Finder, Builder)); } /// Matches if the cast's source expression /// or opaque value's source expression matches the given matcher. /// /// Example 1: matches "a string" /// (matcher = castExpr(hasSourceExpression(cxxConstructExpr()))) /// \code /// class URL { URL(string); }; /// URL url = "a string"; /// \endcode /// /// Example 2: matches 'b' (matcher = /// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr()))) /// \code /// int a = b ?: 1; /// \endcode AST_POLYMORPHIC_MATCHER_P(hasSourceExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr, OpaqueValueExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const SubExpression = internal::GetSourceExpressionMatcher<NodeType>::get(Node); return (SubExpression != nullptr && InnerMatcher.matches(*SubExpression, Finder, Builder)); } /// Matches casts that has a given cast kind. /// /// Example: matches the implicit cast around \c 0 /// (matcher = castExpr(hasCastKind(CK_NullToPointer))) /// \code /// int *p = 0; /// \endcode /// /// If the matcher is use from clang-query, CastKind parameter /// should be passed as a quoted string. e.g., hasCastKind("CK_NullToPointer"). AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) { return Node.getCastKind() == Kind; } /// Matches casts whose destination type matches a given matcher. /// /// (Note: Clang's AST refers to other conversions as "casts" too, and calls /// actual casts "explicit" casts.) AST_MATCHER_P(ExplicitCastExpr, hasDestinationType, internal::Matcher<QualType>, InnerMatcher) { const QualType NodeType = Node.getTypeAsWritten(); return InnerMatcher.matches(NodeType, Finder, Builder); } /// Matches implicit casts whose destination type matches a given /// matcher. /// /// FIXME: Unit test this matcher AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getType(), Finder, Builder); } /// Matches TagDecl object that are spelled with "struct." /// /// Example matches S, but not C, U or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isStruct) { return Node.isStruct(); } /// Matches TagDecl object that are spelled with "union." /// /// Example matches U, but not C, S or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isUnion) { return Node.isUnion(); } /// Matches TagDecl object that are spelled with "class." /// /// Example matches C, but not S, U or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isClass) { return Node.isClass(); } /// Matches TagDecl object that are spelled with "enum." /// /// Example matches E, but not C, S or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isEnum) { return Node.isEnum(); } /// Matches the true branch expression of a conditional operator. /// /// Example 1 (conditional ternary operator): matches a /// \code /// condition ? a : b /// \endcode /// /// Example 2 (conditional binary operator): matches opaqueValueExpr(condition) /// \code /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getTrueExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches the false branch expression of a conditional operator /// (binary or ternary). /// /// Example matches b /// \code /// condition ? a : b /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getFalseExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches if a declaration has a body attached. /// /// Example matches A, va, fa /// \code /// class A {}; /// class B; // Doesn't match, as it has no body. /// int va; /// extern int vb; // Doesn't match, as it doesn't define the variable. /// void fa() {} /// void fb(); // Doesn't match, as it has no body. /// @interface X /// - (void)ma; // Doesn't match, interface is declaration. /// @end /// @implementation X /// - (void)ma {} /// @end /// \endcode /// /// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>, /// Matcher<ObjCMethodDecl> AST_POLYMORPHIC_MATCHER(isDefinition, AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl, ObjCMethodDecl, FunctionDecl)) { return Node.isThisDeclarationADefinition(); } /// Matches if a function declaration is variadic. /// /// Example matches f, but not g or h. The function i will not match, even when /// compiled in C mode. /// \code /// void f(...); /// void g(int); /// template <typename... Ts> void h(Ts...); /// void i(); /// \endcode AST_MATCHER(FunctionDecl, isVariadic) { return Node.isVariadic(); } /// Matches the class declaration that the given method declaration /// belongs to. /// /// FIXME: Generalize this for other kinds of declarations. /// FIXME: What other kind of declarations would we need to generalize /// this to? /// /// Example matches A() in the last line /// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl( /// ofClass(hasName("A")))))) /// \code /// class A { /// public: /// A(); /// }; /// A a = A(); /// \endcode AST_MATCHER_P(CXXMethodDecl, ofClass, internal::Matcher<CXXRecordDecl>, InnerMatcher) { const CXXRecordDecl *Parent = Node.getParent(); return (Parent != nullptr && InnerMatcher.matches(*Parent, Finder, Builder)); } /// Matches each method overridden by the given method. This matcher may /// produce multiple matches. /// /// Given /// \code /// class A { virtual void f(); }; /// class B : public A { void f(); }; /// class C : public B { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note /// that B::f is not overridden by C::f). /// /// The check can produce multiple matches in case of multiple inheritance, e.g. /// \code /// class A1 { virtual void f(); }; /// class A2 { virtual void f(); }; /// class C : public A1, public A2 { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and /// once with "b" binding "A2::f" and "d" binding "C::f". AST_MATCHER_P(CXXMethodDecl, forEachOverridden, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *Overridden : Node.overridden_methods()) { BoundNodesTreeBuilder OverriddenBuilder(*Builder); const bool OverriddenMatched = InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder); if (OverriddenMatched) { Matched = true; Result.addMatch(OverriddenBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches declarations of virtual methods and C++ base specifers that specify /// virtual inheritance. /// /// Example: /// \code /// class A { /// public: /// virtual void x(); // matches x /// }; /// \endcode /// /// Example: /// \code /// class Base {}; /// class DirectlyDerived : virtual Base {}; // matches Base /// class IndirectlyDerived : DirectlyDerived, Base {}; // matches Base /// \endcode /// /// Usable as: Matcher<CXXMethodDecl>, Matcher<CXXBaseSpecifier> AST_POLYMORPHIC_MATCHER(isVirtual, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXMethodDecl, CXXBaseSpecifier)) { return Node.isVirtual(); } /// Matches if the given method declaration has an explicit "virtual". /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// void x(); /// }; /// \endcode /// matches A::x but not B::x AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) { return Node.isVirtualAsWritten(); } /// Matches if the given method or class declaration is final. /// /// Given: /// \code /// class A final {}; /// /// struct B { /// virtual void f(); /// }; /// /// struct C : B { /// void f() final; /// }; /// \endcode /// matches A and C::f, but not B, C, or B::f AST_POLYMORPHIC_MATCHER(isFinal, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, CXXMethodDecl)) { return Node.template hasAttr<FinalAttr>(); } /// Matches if the given method declaration is pure. /// /// Given /// \code /// class A { /// public: /// virtual void x() = 0; /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isPure) { return Node.isPure(); } /// Matches if the given method declaration is const. /// /// Given /// \code /// struct A { /// void foo() const; /// void bar(); /// }; /// \endcode /// /// cxxMethodDecl(isConst()) matches A::foo() but not A::bar() AST_MATCHER(CXXMethodDecl, isConst) { return Node.isConst(); } /// Matches if the given method declaration declares a copy assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not /// the second one. AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) { return Node.isCopyAssignmentOperator(); } /// Matches if the given method declaration declares a move assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not /// the first one. AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) { return Node.isMoveAssignmentOperator(); } /// Matches if the given method declaration overrides another method. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// virtual void x(); /// }; /// \endcode /// matches B::x AST_MATCHER(CXXMethodDecl, isOverride) { return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>(); } /// Matches method declarations that are user-provided. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &) = default; // #2 /// S(S &&) = delete; // #3 /// }; /// \endcode /// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3. AST_MATCHER(CXXMethodDecl, isUserProvided) { return Node.isUserProvided(); } /// Matches member expressions that are called with '->' as opposed /// to '.'. /// /// Member calls on the implicit this pointer match as called with '->'. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// template <class T> void f() { this->f<T>(); f<T>(); } /// int a; /// static int b; /// }; /// template <class T> /// class Z { /// void x() { this->m; } /// }; /// \endcode /// memberExpr(isArrow()) /// matches this->x, x, y.x, a, this->b /// cxxDependentScopeMemberExpr(isArrow()) /// matches this->m /// unresolvedMemberExpr(isArrow()) /// matches this->f<T>, f<T> AST_POLYMORPHIC_MATCHER( isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr)) { return Node.isArrow(); } /// Matches QualType nodes that are of integer type. /// /// Given /// \code /// void a(int); /// void b(long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isInteger()))) /// matches "a(int)", "b(long)", but not "c(double)". AST_MATCHER(QualType, isInteger) { return Node->isIntegerType(); } /// Matches QualType nodes that are of unsigned integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isUnsignedInteger()))) /// matches "b(unsigned long)", but not "a(int)" and "c(double)". AST_MATCHER(QualType, isUnsignedInteger) { return Node->isUnsignedIntegerType(); } /// Matches QualType nodes that are of signed integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isSignedInteger()))) /// matches "a(int)", but not "b(unsigned long)" and "c(double)". AST_MATCHER(QualType, isSignedInteger) { return Node->isSignedIntegerType(); } /// Matches QualType nodes that are of character type. /// /// Given /// \code /// void a(char); /// void b(wchar_t); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isAnyCharacter()))) /// matches "a(char)", "b(wchar_t)", but not "c(double)". AST_MATCHER(QualType, isAnyCharacter) { return Node->isAnyCharacterType(); } /// Matches QualType nodes that are of any pointer type; this includes /// the Objective-C object pointer type, which is different despite being /// syntactically similar. /// /// Given /// \code /// int *i = nullptr; /// /// @interface Foo /// @end /// Foo *f; /// /// int j; /// \endcode /// varDecl(hasType(isAnyPointer())) /// matches "int *i" and "Foo *f", but not "int j". AST_MATCHER(QualType, isAnyPointer) { return Node->isAnyPointerType(); } /// Matches QualType nodes that are const-qualified, i.e., that /// include "top-level" const. /// /// Given /// \code /// void a(int); /// void b(int const); /// void c(const int); /// void d(const int*); /// void e(int const) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isConstQualified()))) /// matches "void b(int const)", "void c(const int)" and /// "void e(int const) {}". It does not match d as there /// is no top-level const on the parameter type "const int *". AST_MATCHER(QualType, isConstQualified) { return Node.isConstQualified(); } /// Matches QualType nodes that are volatile-qualified, i.e., that /// include "top-level" volatile. /// /// Given /// \code /// void a(int); /// void b(int volatile); /// void c(volatile int); /// void d(volatile int*); /// void e(int volatile) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isVolatileQualified()))) /// matches "void b(int volatile)", "void c(volatile int)" and /// "void e(int volatile) {}". It does not match d as there /// is no top-level volatile on the parameter type "volatile int *". AST_MATCHER(QualType, isVolatileQualified) { return Node.isVolatileQualified(); } /// Matches QualType nodes that have local CV-qualifiers attached to /// the node, not hidden within a typedef. /// /// Given /// \code /// typedef const int const_int; /// const_int i; /// int *const j; /// int *volatile k; /// int m; /// \endcode /// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k. /// \c i is const-qualified but the qualifier is not local. AST_MATCHER(QualType, hasLocalQualifiers) { return Node.hasLocalQualifiers(); } /// Matches a member expression where the member is matched by a /// given matcher. /// /// Given /// \code /// struct { int first, second; } first, second; /// int i(second.first); /// int j(first.second); /// \endcode /// memberExpr(member(hasName("first"))) /// matches second.first /// but not first.second (because the member name there is "second"). AST_MATCHER_P(MemberExpr, member, internal::Matcher<ValueDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder); } /// Matches a member expression where the object expression is matched by a /// given matcher. Implicit object expressions are included; that is, it matches /// use of implicit `this`. /// /// Given /// \code /// struct X { /// int m; /// int f(X x) { x.m; return m; } /// }; /// \endcode /// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m`, but not `m`; however, /// memberExpr(hasObjectExpression(hasType(pointsTo( // cxxRecordDecl(hasName("X")))))) /// matches `m` (aka. `this->m`), but not `x.m`. AST_POLYMORPHIC_MATCHER_P( hasObjectExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr), internal::Matcher<Expr>, InnerMatcher) { if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; return InnerMatcher.matches(*Node.getBase(), Finder, Builder); } /// Matches any using shadow declaration. /// /// Given /// \code /// namespace X { void b(); } /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasName("b")))) /// matches \code using X::b \endcode AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(), Node.shadow_end(), Finder, Builder); } /// Matches a using shadow declaration where the target declaration is /// matched by the given matcher. /// /// Given /// \code /// namespace X { int a; void b(); } /// using X::a; /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl()))) /// matches \code using X::b \endcode /// but not \code using X::a \endcode AST_MATCHER_P(UsingShadowDecl, hasTargetDecl, internal::Matcher<NamedDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder); } /// Matches template instantiations of function, class, or static /// member variable template instantiations. /// /// Given /// \code /// template <typename T> class X {}; class A {}; X<A> x; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; template class X<A>; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; extern template class X<A>; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// matches the template instantiation of X<A>. /// /// But given /// \code /// template <typename T> class X {}; class A {}; /// template <> class X<A> {}; X<A> x; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// does not match, as X<A> is an explicit template specialization. /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isTemplateInstantiation, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDeclaration); } /// Matches declarations that are template instantiations or are inside /// template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { T i; } /// A(0); /// A(0U); /// \endcode /// functionDecl(isInstantiated()) /// matches 'A(int) {...};' and 'A(unsigned) {...}'. AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) { auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))); return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation))); } /// Matches statements inside of a template instantiation. /// /// Given /// \code /// int j; /// template<typename T> void A(T t) { T i; j += 42;} /// A(0); /// A(0U); /// \endcode /// declStmt(isInTemplateInstantiation()) /// matches 'int i;' and 'unsigned i'. /// unless(stmt(isInTemplateInstantiation())) /// will NOT match j += 42; as it's shared between the template definition and /// instantiation. AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) { return stmt( hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))))); } /// Matches explicit template specializations of function, class, or /// static member variable template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { } /// template<> void A(int N) { } /// \endcode /// functionDecl(isExplicitTemplateSpecialization()) /// matches the specialization A<int>(). /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization); } /// Matches \c TypeLocs for which the given inner /// QualType-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc, internal::Matcher<QualType>, InnerMatcher, 0) { return internal::BindableMatcher<TypeLoc>( new internal::TypeLocTypeMatcher(InnerMatcher)); } /// Matches type \c bool. /// /// Given /// \code /// struct S { bool func(); }; /// \endcode /// functionDecl(returns(booleanType())) /// matches "bool func();" AST_MATCHER(Type, booleanType) { return Node.isBooleanType(); } /// Matches type \c void. /// /// Given /// \code /// struct S { void func(); }; /// \endcode /// functionDecl(returns(voidType())) /// matches "void func();" AST_MATCHER(Type, voidType) { return Node.isVoidType(); } template <typename NodeType> using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>; /// Matches builtin Types. /// /// Given /// \code /// struct A {}; /// A a; /// int b; /// float c; /// bool d; /// \endcode /// builtinType() /// matches "int b", "float c" and "bool d" extern const AstTypeMatcher<BuiltinType> builtinType; /// Matches all kinds of arrays. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[4]; /// void f() { int c[a[0]]; } /// \endcode /// arrayType() /// matches "int a[]", "int b[4]" and "int c[a[0]]"; extern const AstTypeMatcher<ArrayType> arrayType; /// Matches C99 complex types. /// /// Given /// \code /// _Complex float f; /// \endcode /// complexType() /// matches "_Complex float f" extern const AstTypeMatcher<ComplexType> complexType; /// Matches any real floating-point type (float, double, long double). /// /// Given /// \code /// int i; /// float f; /// \endcode /// realFloatingPointType() /// matches "float f" but not "int i" AST_MATCHER(Type, realFloatingPointType) { return Node.isRealFloatingType(); } /// Matches arrays and C99 complex types that have a specific element /// type. /// /// Given /// \code /// struct A {}; /// A a[7]; /// int b[7]; /// \endcode /// arrayType(hasElementType(builtinType())) /// matches "int b[7]" /// /// Usable as: Matcher<ArrayType>, Matcher<ComplexType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement, AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType, ComplexType)); /// Matches C arrays with a specified constant size. /// /// Given /// \code /// void() { /// int a[2]; /// int b[] = { 2, 3 }; /// int c[b[0]]; /// } /// \endcode /// constantArrayType() /// matches "int a[2]" extern const AstTypeMatcher<ConstantArrayType> constantArrayType; /// Matches nodes that have the specified size. /// /// Given /// \code /// int a[42]; /// int b[2 * 21]; /// int c[41], d[43]; /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// char *w = "a"; /// \endcode /// constantArrayType(hasSize(42)) /// matches "int a[42]" and "int b[2 * 21]" /// stringLiteral(hasSize(4)) /// matches "abcd", L"abcd" AST_POLYMORPHIC_MATCHER_P(hasSize, AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType, StringLiteral), unsigned, N) { return internal::HasSizeMatcher<NodeType>::hasSize(Node, N); } /// Matches C++ arrays whose size is a value-dependent expression. /// /// Given /// \code /// template<typename T, int Size> /// class array { /// T data[Size]; /// }; /// \endcode /// dependentSizedArrayType /// matches "T data[Size]" extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType; /// Matches C arrays with unspecified size. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[42]; /// void f(int c[]) { int d[a[0]]; }; /// \endcode /// incompleteArrayType() /// matches "int a[]" and "int c[]" extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType; /// Matches C arrays with a specified size that is not an /// integer-constant-expression. /// /// Given /// \code /// void f() { /// int a[] = { 2, 3 } /// int b[42]; /// int c[a[0]]; /// } /// \endcode /// variableArrayType() /// matches "int c[a[0]]" extern const AstTypeMatcher<VariableArrayType> variableArrayType; /// Matches \c VariableArrayType nodes that have a specific size /// expression. /// /// Given /// \code /// void f(int b) { /// int a[b]; /// } /// \endcode /// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to( /// varDecl(hasName("b"))))))) /// matches "int a[b]" AST_MATCHER_P(VariableArrayType, hasSizeExpr, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder); } /// Matches atomic types. /// /// Given /// \code /// _Atomic(int) i; /// \endcode /// atomicType() /// matches "_Atomic(int) i" extern const AstTypeMatcher<AtomicType> atomicType; /// Matches atomic types with a specific value type. /// /// Given /// \code /// _Atomic(int) i; /// _Atomic(float) f; /// \endcode /// atomicType(hasValueType(isInteger())) /// matches "_Atomic(int) i" /// /// Usable as: Matcher<AtomicType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue, AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType)); /// Matches types nodes representing C++11 auto types. /// /// Given: /// \code /// auto n = 4; /// int v[] = { 2, 3 } /// for (auto i : v) { } /// \endcode /// autoType() /// matches "auto n" and "auto i" extern const AstTypeMatcher<AutoType> autoType; /// Matches types nodes representing C++11 decltype(<expr>) types. /// /// Given: /// \code /// short i = 1; /// int j = 42; /// decltype(i + j) result = i + j; /// \endcode /// decltypeType() /// matches "decltype(i + j)" extern const AstTypeMatcher<DecltypeType> decltypeType; /// Matches \c AutoType nodes where the deduced type is a specific type. /// /// Note: There is no \c TypeLoc for the deduced type and thus no /// \c getDeducedLoc() matcher. /// /// Given /// \code /// auto a = 1; /// auto b = 2.0; /// \endcode /// autoType(hasDeducedType(isInteger())) /// matches "auto a" /// /// Usable as: Matcher<AutoType> AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType, AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType)); /// Matches \c DecltypeType nodes to find out the underlying type. /// /// Given /// \code /// decltype(1) a = 1; /// decltype(2.0) b = 2.0; /// \endcode /// decltypeType(hasUnderlyingType(isInteger())) /// matches the type of "a" /// /// Usable as: Matcher<DecltypeType> AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType, AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType)); /// Matches \c FunctionType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionType() /// matches "int (*f)(int)" and the type of "g". extern const AstTypeMatcher<FunctionType> functionType; /// Matches \c FunctionProtoType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionProtoType() /// matches "int (*f)(int)" and the type of "g" in C++ mode. /// In C mode, "g" is not matched because it does not contain a prototype. extern const AstTypeMatcher<FunctionProtoType> functionProtoType; /// Matches \c ParenType nodes. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int *array_of_ptrs[4]; /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not /// \c array_of_ptrs. extern const AstTypeMatcher<ParenType> parenType; /// Matches \c ParenType nodes where the inner type is a specific type. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int (*ptr_to_func)(int); /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches /// \c ptr_to_func but not \c ptr_to_array. /// /// Usable as: Matcher<ParenType> AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType, AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType)); /// Matches block pointer types, i.e. types syntactically represented as /// "void (^)(int)". /// /// The \c pointee is always required to be a \c FunctionType. extern const AstTypeMatcher<BlockPointerType> blockPointerType; /// Matches member pointer types. /// Given /// \code /// struct A { int i; } /// A::* ptr = A::i; /// \endcode /// memberPointerType() /// matches "A::* ptr" extern const AstTypeMatcher<MemberPointerType> memberPointerType; /// Matches pointer types, but does not match Objective-C object pointer /// types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int c = 5; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "int *a", but does not match "Foo *f". extern const AstTypeMatcher<PointerType> pointerType; /// Matches an Objective-C object pointer type, which is different from /// a pointer type, despite being syntactically similar. /// /// Given /// \code /// int *a; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "Foo *f", but does not match "int *a". extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType; /// Matches both lvalue and rvalue reference types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f. extern const AstTypeMatcher<ReferenceType> referenceType; /// Matches lvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is /// matched since the type is deduced as int& by reference collapsing rules. extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType; /// Matches rvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not /// matched as it is deduced to int& by reference collapsing rules. extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType; /// Narrows PointerType (and similar) matchers to those where the /// \c pointee matches a given matcher. /// /// Given /// \code /// int *a; /// int const *b; /// float const *f; /// \endcode /// pointerType(pointee(isConstQualified(), isInteger())) /// matches "int const *b" /// /// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>, /// Matcher<PointerType>, Matcher<ReferenceType> AST_TYPELOC_TRAVERSE_MATCHER_DECL( pointee, getPointee, AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType, PointerType, ReferenceType)); /// Matches typedef types. /// /// Given /// \code /// typedef int X; /// \endcode /// typedefType() /// matches "typedef int X" extern const AstTypeMatcher<TypedefType> typedefType; /// Matches enum types. /// /// Given /// \code /// enum C { Green }; /// enum class S { Red }; /// /// C c; /// S s; /// \endcode // /// \c enumType() matches the type of the variable declarations of both \c c and /// \c s. extern const AstTypeMatcher<EnumType> enumType; /// Matches template specialization types. /// /// Given /// \code /// template <typename T> /// class C { }; /// /// template class C<int>; // A /// C<char> var; // B /// \endcode /// /// \c templateSpecializationType() matches the type of the explicit /// instantiation in \c A and the type of the variable declaration in \c B. extern const AstTypeMatcher<TemplateSpecializationType> templateSpecializationType; /// Matches C++17 deduced template specialization types, e.g. deduced class /// template types. /// /// Given /// \code /// template <typename T> /// class C { public: C(T); }; /// /// C c(123); /// \endcode /// \c deducedTemplateSpecializationType() matches the type in the declaration /// of the variable \c c. extern const AstTypeMatcher<DeducedTemplateSpecializationType> deducedTemplateSpecializationType; /// Matches types nodes representing unary type transformations. /// /// Given: /// \code /// typedef __underlying_type(T) type; /// \endcode /// unaryTransformType() /// matches "__underlying_type(T)" extern const AstTypeMatcher<UnaryTransformType> unaryTransformType; /// Matches record types (e.g. structs, classes). /// /// Given /// \code /// class C {}; /// struct S {}; /// /// C c; /// S s; /// \endcode /// /// \c recordType() matches the type of the variable declarations of both \c c /// and \c s. extern const AstTypeMatcher<RecordType> recordType; /// Matches tag types (record and enum types). /// /// Given /// \code /// enum E {}; /// class C {}; /// /// E e; /// C c; /// \endcode /// /// \c tagType() matches the type of the variable declarations of both \c e /// and \c c. extern const AstTypeMatcher<TagType> tagType; /// Matches types specified with an elaborated type keyword or with a /// qualified name. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// class C {}; /// /// class C c; /// N::M::D d; /// \endcode /// /// \c elaboratedType() matches the type of the variable declarations of both /// \c c and \c d. extern const AstTypeMatcher<ElaboratedType> elaboratedType; /// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier, /// matches \c InnerMatcher if the qualifier exists. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))) /// matches the type of the variable declaration of \c d. AST_MATCHER_P(ElaboratedType, hasQualifier, internal::Matcher<NestedNameSpecifier>, InnerMatcher) { if (const NestedNameSpecifier *Qualifier = Node.getQualifier()) return InnerMatcher.matches(*Qualifier, Finder, Builder); return false; } /// Matches ElaboratedTypes whose named type matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(namesType(recordType( /// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable /// declaration of \c d. AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getNamedType(), Finder, Builder); } /// Matches types that represent the result of substituting a type for a /// template type parameter. /// /// Given /// \code /// template <typename T> /// void F(T t) { /// int i = 1 + t; /// } /// \endcode /// /// \c substTemplateTypeParmType() matches the type of 't' but not '1' extern const AstTypeMatcher<SubstTemplateTypeParmType> substTemplateTypeParmType; /// Matches template type parameter substitutions that have a replacement /// type that matches the provided matcher. /// /// Given /// \code /// template <typename T> /// double F(T t); /// int i; /// double j = F(i); /// \endcode /// /// \c substTemplateTypeParmType(hasReplacementType(type())) matches int AST_TYPE_TRAVERSE_MATCHER( hasReplacementType, getReplacementType, AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType)); /// Matches template type parameter types. /// /// Example matches T, but not int. /// (matcher = templateTypeParmType()) /// \code /// template <typename T> void f(int i); /// \endcode extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType; /// Matches injected class name types. /// /// Example matches S s, but not S<T> s. /// (matcher = parmVarDecl(hasType(injectedClassNameType()))) /// \code /// template <typename T> struct S { /// void f(S s); /// void g(S<T> s); /// }; /// \endcode extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType; /// Matches decayed type /// Example matches i[] in declaration of f. /// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType()))))) /// Example matches i[1]. /// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType()))))) /// \code /// void f(int i[]) { /// i[1] = 0; /// } /// \endcode extern const AstTypeMatcher<DecayedType> decayedType; /// Matches the decayed type, whos decayed type matches \c InnerMatcher AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>, InnerType) { return InnerType.matches(Node.getDecayedType(), Finder, Builder); } /// Matches declarations whose declaration context, interpreted as a /// Decl, matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// \endcode /// /// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the /// declaration of \c class \c D. AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) { const DeclContext *DC = Node.getDeclContext(); if (!DC) return false; return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder); } /// Matches nested name specifiers. /// /// Given /// \code /// namespace ns { /// struct A { static void f(); }; /// void A::f() {} /// void g() { A::f(); } /// } /// ns::A a; /// \endcode /// nestedNameSpecifier() /// matches "ns::" and both "A::" extern const internal::VariadicAllOfMatcher<NestedNameSpecifier> nestedNameSpecifier; /// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc. extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc> nestedNameSpecifierLoc; /// Matches \c NestedNameSpecifierLocs for which the given inner /// NestedNameSpecifier-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD( internal::BindableMatcher<NestedNameSpecifierLoc>, loc, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) { return internal::BindableMatcher<NestedNameSpecifierLoc>( new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>( InnerMatcher)); } /// Matches nested name specifiers that specify a type matching the /// given \c QualType matcher without qualifiers. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(specifiesType( /// hasDeclaration(cxxRecordDecl(hasName("A"))) /// )) /// matches "A::" AST_MATCHER_P(NestedNameSpecifier, specifiesType, internal::Matcher<QualType>, InnerMatcher) { if (!Node.getAsType()) return false; return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder); } /// Matches nested name specifier locs that specify a type matching the /// given \c TypeLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type( /// hasDeclaration(cxxRecordDecl(hasName("A"))))))) /// matches "A::" AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc, internal::Matcher<TypeLoc>, InnerMatcher) { return Node && Node.getNestedNameSpecifier()->getAsType() && InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifier. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 0) { const NestedNameSpecifier *NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(*NextNode, Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifierLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A"))))) /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix, internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher, 1) { NestedNameSpecifierLoc NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(NextNode, Finder, Builder); } /// Matches nested name specifiers that specify a namespace matching the /// given namespace matcher. /// /// Given /// \code /// namespace ns { struct A {}; } /// ns::A a; /// \endcode /// nestedNameSpecifier(specifiesNamespace(hasName("ns"))) /// matches "ns::" AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace, internal::Matcher<NamespaceDecl>, InnerMatcher) { if (!Node.getAsNamespace()) return false; return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder); } /// Overloads for the \c equalsNode matcher. /// FIXME: Implement for other node types. /// @{ /// Matches if a node equals another node. /// /// \c Decl has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Stmt has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Type has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) { return &Node == Other; } /// @} /// Matches each case or default statement belonging to the given switch /// statement. This matcher may produce multiple matches. /// /// Given /// \code /// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } } /// \endcode /// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s") /// matches four times, with "c" binding each of "case 1:", "case 2:", /// "case 3:" and "case 4:", and "s" respectively binding "switch (1)", /// "switch (1)", "switch (2)" and "switch (2)". AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>, InnerMatcher) { BoundNodesTreeBuilder Result; // FIXME: getSwitchCaseList() does not necessarily guarantee a stable // iteration order. We should use the more general iterating matchers once // they are capable of expressing this matcher (for example, it should ignore // case statements belonging to nested switch statements). bool Matched = false; for (const SwitchCase *SC = Node.getSwitchCaseList(); SC; SC = SC->getNextSwitchCase()) { BoundNodesTreeBuilder CaseBuilder(*Builder); bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder); if (CaseMatched) { Matched = true; Result.addMatch(CaseBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches each constructor initializer in a constructor definition. /// /// Given /// \code /// class A { A() : i(42), j(42) {} int i; int j; }; /// \endcode /// cxxConstructorDecl(forEachConstructorInitializer( /// forField(decl().bind("x")) /// )) /// will trigger two matches, binding for 'i' and 'j' respectively. AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *I : Node.inits()) { BoundNodesTreeBuilder InitBuilder(*Builder); if (InnerMatcher.matches(*I, Finder, &InitBuilder)) { Matched = true; Result.addMatch(InitBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches constructor declarations that are copy constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3. AST_MATCHER(CXXConstructorDecl, isCopyConstructor) { return Node.isCopyConstructor(); } /// Matches constructor declarations that are move constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2. AST_MATCHER(CXXConstructorDecl, isMoveConstructor) { return Node.isMoveConstructor(); } /// Matches constructor declarations that are default constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3. AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) { return Node.isDefaultConstructor(); } /// Matches constructors that delegate to another constructor. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(int) {} // #2 /// S(S &&) : S() {} // #3 /// }; /// S::S() : S(0) {} // #4 /// \endcode /// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not /// #1 or #2. AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) { return Node.isDelegatingConstructor(); } /// Matches constructor, conversion function, and deduction guide declarations /// that have an explicit specifier if this explicit specifier is resolved to /// true. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9. /// cxxConversionDecl(isExplicit()) will match #4, but not #3. /// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5. AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES( CXXConstructorDecl, CXXConversionDecl, CXXDeductionGuideDecl)) { return Node.isExplicit(); } /// Matches the expression in an explicit specifier if present in the given /// declaration. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2. /// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4. /// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6. AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>, InnerMatcher) { ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node); if (!ES.getExpr()) return false; return InnerMatcher.matches(*ES.getExpr(), Finder, Builder); } /// Matches function and namespace declarations that are marked with /// the inline keyword. /// /// Given /// \code /// inline void f(); /// void g(); /// namespace n { /// inline namespace m {} /// } /// \endcode /// functionDecl(isInline()) will match ::f(). /// namespaceDecl(isInline()) will match n::m. AST_POLYMORPHIC_MATCHER(isInline, AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl, FunctionDecl)) { // This is required because the spelling of the function used to determine // whether inline is specified or not differs between the polymorphic types. if (const auto *FD = dyn_cast<FunctionDecl>(&Node)) return FD->isInlineSpecified(); else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node)) return NSD->isInline(); llvm_unreachable("Not a valid polymorphic type"); } /// Matches anonymous namespace declarations. /// /// Given /// \code /// namespace n { /// namespace {} // #1 /// } /// \endcode /// namespaceDecl(isAnonymous()) will match #1 but not ::n. AST_MATCHER(NamespaceDecl, isAnonymous) { return Node.isAnonymousNamespace(); } /// Matches declarations in the namespace `std`, but not in nested namespaces. /// /// Given /// \code /// class vector {}; /// namespace foo { /// class vector {}; /// namespace std { /// class vector {}; /// } /// } /// namespace std { /// inline namespace __1 { /// class vector {}; // #1 /// namespace experimental { /// class vector {}; /// } /// } /// } /// \endcode /// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1. AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); } /// If the given case statement does not use the GNU case range /// extension, matches the constant given in the statement. /// /// Given /// \code /// switch (1) { case 1: case 1+1: case 3 ... 4: ; } /// \endcode /// caseStmt(hasCaseConstant(integerLiteral())) /// matches "case 1:" AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>, InnerMatcher) { if (Node.getRHS()) return false; return InnerMatcher.matches(*Node.getLHS(), Finder, Builder); } /// Matches declaration that has a given attribute. /// /// Given /// \code /// __attribute__((device)) void f() { ... } /// \endcode /// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of /// f. If the matcher is used from clang-query, attr::Kind parameter should be /// passed as a quoted string. e.g., hasAttr("attr::CUDADevice"). AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) { for (const auto *Attr : Node.attrs()) { if (Attr->getKind() == AttrKind) return true; } return false; } /// Matches the return value expression of a return statement /// /// Given /// \code /// return a + b; /// \endcode /// hasReturnValue(binaryOperator()) /// matches 'return a + b' /// with binaryOperator() /// matching 'a + b' AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>, InnerMatcher) { if (const auto *RetValue = Node.getRetValue()) return InnerMatcher.matches(*RetValue, Finder, Builder); return false; } /// Matches CUDA kernel call expression. /// /// Example matches, /// \code /// kernel<<<i,j>>>(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr> cudaKernelCallExpr; /// Matches expressions that resolve to a null pointer constant, such as /// GNU's __null, C++11's nullptr, or C's NULL macro. /// /// Given: /// \code /// void *v1 = NULL; /// void *v2 = nullptr; /// void *v3 = __null; // GNU extension /// char *cp = (char *)0; /// int *ip = 0; /// int i = 0; /// \endcode /// expr(nullPointerConstant()) /// matches the initializer for v1, v2, v3, cp, and ip. Does not match the /// initializer for i. AST_MATCHER(Expr, nullPointerConstant) { return Node.isNullPointerConstant(Finder->getASTContext(), Expr::NPC_ValueDependentIsNull); } /// Matches declaration of the function the statement belongs to /// /// Given: /// \code /// F& operator=(const F& o) { /// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; }); /// return *this; /// } /// \endcode /// returnStmt(forFunction(hasName("operator="))) /// matches 'return *this' /// but does not match 'return v > 0' AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>, InnerMatcher) { const auto &Parents = Finder->getASTContext().getParents(Node); llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end()); while(!Stack.empty()) { const auto &CurNode = Stack.back(); Stack.pop_back(); if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) { if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) { return true; } } else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) { if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder, Builder)) { return true; } } else { for(const auto &Parent: Finder->getASTContext().getParents(CurNode)) Stack.push_back(Parent); } } return false; } /// Matches a declaration that has external formal linkage. /// /// Example matches only z (matcher = varDecl(hasExternalFormalLinkage())) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode /// /// Example matches f() because it has external formal linkage despite being /// unique to the translation unit as though it has internal likage /// (matcher = functionDecl(hasExternalFormalLinkage())) /// /// \code /// namespace { /// void f() {} /// } /// \endcode AST_MATCHER(NamedDecl, hasExternalFormalLinkage) { return Node.hasExternalFormalLinkage(); } /// Matches a declaration that has default arguments. /// /// Example matches y (matcher = parmVarDecl(hasDefaultArgument())) /// \code /// void x(int val) {} /// void y(int val = 0) {} /// \endcode /// /// Deprecated. Use hasInitializer() instead to be able to /// match on the contents of the default argument. For example: /// /// \code /// void x(int val = 7) {} /// void y(int val = 42) {} /// \endcode /// parmVarDecl(hasInitializer(integerLiteral(equals(42)))) /// matches the parameter of y /// /// A matcher such as /// parmVarDecl(hasInitializer(anything())) /// is equivalent to parmVarDecl(hasDefaultArgument()). AST_MATCHER(ParmVarDecl, hasDefaultArgument) { return Node.hasDefaultArg(); } /// Matches array new expressions. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(isArray()) /// matches the expression 'new MyClass[10]'. AST_MATCHER(CXXNewExpr, isArray) { return Node.isArray(); } /// Matches placement new expression arguments. /// /// Given: /// \code /// MyClass *p1 = new (Storage, 16) MyClass(); /// \endcode /// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16)))) /// matches the expression 'new (Storage, 16) MyClass()'. AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index, internal::Matcher<Expr>, InnerMatcher) { return Node.getNumPlacementArgs() > Index && InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder); } /// Matches any placement new expression arguments. /// /// Given: /// \code /// MyClass *p1 = new (Storage) MyClass(); /// \endcode /// cxxNewExpr(hasAnyPlacementArg(anything())) /// matches the expression 'new (Storage, 16) MyClass()'. AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>, InnerMatcher) { return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) { return InnerMatcher.matches(*Arg, Finder, Builder); }); } /// Matches array new expressions with a given array size. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(hasArraySize(integerLiteral(equals(10)))) /// matches the expression 'new MyClass[10]'. AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) { return Node.isArray() && *Node.getArraySize() && InnerMatcher.matches(**Node.getArraySize(), Finder, Builder); } /// Matches a class declaration that is defined. /// /// Example matches x (matcher = cxxRecordDecl(hasDefinition())) /// \code /// class x {}; /// class y; /// \endcode AST_MATCHER(CXXRecordDecl, hasDefinition) { return Node.hasDefinition(); } /// Matches C++11 scoped enum declaration. /// /// Example matches Y (matcher = enumDecl(isScoped())) /// \code /// enum X {}; /// enum class Y {}; /// \endcode AST_MATCHER(EnumDecl, isScoped) { return Node.isScoped(); } /// Matches a function declared with a trailing return type. /// /// Example matches Y (matcher = functionDecl(hasTrailingReturn())) /// \code /// int X() {} /// auto Y() -> int {} /// \endcode AST_MATCHER(FunctionDecl, hasTrailingReturn) { if (const auto *F = Node.getType()->getAs<FunctionProtoType>()) return F->hasTrailingReturn(); return false; } /// Matches expressions that match InnerMatcher that are possibly wrapped in an /// elidable constructor and other corresponding bookkeeping nodes. /// /// In C++17, elidable copy constructors are no longer being generated in the /// AST as it is not permitted by the standard. They are, however, part of the /// AST in C++14 and earlier. So, a matcher must abstract over these differences /// to work in all language modes. This matcher skips elidable constructor-call /// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and /// various implicit nodes inside the constructor calls, all of which will not /// appear in the C++17 AST. /// /// Given /// /// \code /// struct H {}; /// H G(); /// void f() { /// H D = G(); /// } /// \endcode /// /// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))`` /// matches ``H D = G()`` in C++11 through C++17 (and beyond). AST_MATCHER_P(Expr, ignoringElidableConstructorCall, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { // E tracks the node that we are examining. const Expr *E = &Node; // If present, remove an outer `ExprWithCleanups` corresponding to the // underlying `CXXConstructExpr`. This check won't cover all cases of added // `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the // EWC is placed on the outermost node of the expression, which this may not // be), but, it still improves the coverage of this matcher. if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node)) E = CleanupsExpr->getSubExpr(); if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) { if (CtorExpr->isElidable()) { if (const auto *MaterializeTemp = dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) { return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder, Builder); } } } return InnerMatcher.matches(Node, Finder, Builder); } //----------------------------------------------------------------------------// // OpenMP handling. //----------------------------------------------------------------------------// /// Matches any ``#pragma omp`` executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective()`` matches ``omp parallel``, /// ``omp parallel default(none)`` and ``omp taskyield``. extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective> ompExecutableDirective; /// Matches standalone OpenMP directives, /// i.e., directives that can't have a structured block. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective(isStandaloneDirective()))`` matches /// ``omp taskyield``. AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) { return Node.isStandaloneDirective(); } /// Matches the structured-block of the OpenMP executable directive /// /// Prerequisite: the executable directive must not be standalone directive. /// If it is, it will never match. /// /// Given /// /// \code /// #pragma omp parallel /// ; /// #pragma omp parallel /// {} /// \endcode /// /// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;`` AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock, internal::Matcher<Stmt>, InnerMatcher) { if (Node.isStandaloneDirective()) return false; // Standalone directives have no structured blocks. return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder); } /// Matches any clause in an OpenMP directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// \endcode /// /// ``ompExecutableDirective(hasAnyClause(anything()))`` matches /// ``omp parallel default(none)``. AST_MATCHER_P(OMPExecutableDirective, hasAnyClause, internal::Matcher<OMPClause>, InnerMatcher) { ArrayRef<OMPClause *> Clauses = Node.clauses(); return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(), Clauses.end(), Finder, Builder); } /// Matches OpenMP ``default`` clause. /// /// Given /// /// \code /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel /// \endcode /// /// ``ompDefaultClause()`` matches ``default(none)`` and ``default(shared)``. extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause> ompDefaultClause; /// Matches if the OpenMP ``default`` clause has ``none`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// \endcode /// /// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``. AST_MATCHER(OMPDefaultClause, isNoneKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none; } /// Matches if the OpenMP ``default`` clause has ``shared`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// \endcode /// /// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``. AST_MATCHER(OMPDefaultClause, isSharedKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared; } /// Matches if the OpenMP directive is allowed to contain the specified OpenMP /// clause kind. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel for /// #pragma omp for /// \endcode /// /// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches /// ``omp parallel`` and ``omp parallel for``. /// /// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter /// should be passed as a quoted string. e.g., /// ``isAllowedToContainClauseKind("OMPC_default").`` AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind, OpenMPClauseKind, CKind) { return llvm::omp::isAllowedClauseForDirective( Node.getDirectiveKind(), CKind, Finder->getASTContext().getLangOpts().OpenMP); } //----------------------------------------------------------------------------// // End OpenMP handling. //----------------------------------------------------------------------------// } // namespace ast_matchers } // namespace clang #endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
GB_binop__le_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__le_fp64) // A.*B function (eWiseMult): GB (_AemultB_01__le_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__le_fp64) // A.*B function (eWiseMult): GB (_AemultB_03__le_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__le_fp64) // A*D function (colscale): GB (_AxD__le_fp64) // D*A function (rowscale): GB (_DxB__le_fp64) // C+=B function (dense accum): GB (_Cdense_accumB__le_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__le_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_fp64) // C=scalar+B GB (_bind1st__le_fp64) // C=scalar+B' GB (_bind1st_tran__le_fp64) // C=A+scalar GB (_bind2nd__le_fp64) // C=A'+scalar GB (_bind2nd_tran__le_fp64) // C type: bool // A type: double // B,b type: double // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_FP64 || GxB_NO_LE_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__le_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__le_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__le_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__le_fp64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__le_fp64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__le_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__le_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__le_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__le_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__le_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__le_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__le_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__le_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__le_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
core_dlacpy_band.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlacpy_band.c, normal z -> d, Fri Sep 28 17:38:19 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" /******************************************************************************* * * @ingroup core_double * * plasma_core_dlacpy copies a sub-block A of a band matrix stored in LAPACK's band format * to a corresponding sub-block B of a band matrix in PLASMA's band format * ******************************************************************************* * * @param[in] it * The row block index of the tile. * * @param[in] jt * The column block index of the tile. * * @param[in] m * The number of rows of the matrices A and B. M >= 0. * * @param[in] n * The number of columns of the matrices A and B. N >= 0. * * @param[in] A * The M-by-N matrix to copy. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,M). * * @param[out] B * The M-by-N copy of the matrix A. * On exit, B = A ONLY in the locations specified by uplo. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,M). * ******************************************************************************/ __attribute__((weak)) void plasma_core_dlacpy_lapack2tile_band(plasma_enum_t uplo, int it, int jt, int m, int n, int nb, int kl, int ku, const double *A, int lda, double *B, int ldb) { int i, j; int j_start, j_end; if (uplo == PlasmaGeneral) { j_start = 0; // pivot back and could fill in j_end = (jt <= it ? n : imin(n, (it-jt)*nb+m+ku+kl+1)); } else if (uplo == PlasmaUpper) { j_start = 0; j_end = imin(n, (it-jt)*nb+m+ku+1); } else { j_start = imax(0, (it-jt)*nb-kl); j_end = n; } for (j = 0; j < j_start; j++) { for (i = 0; i < m; i++) { B[i + j*ldb] = 0.0; } } for (j = j_start; j < j_end; j++) { int i_start, i_end; if (uplo == PlasmaGeneral) { i_start = (jt <= it ? 0 : imax(0, (jt-it)*nb+j-ku-kl)); i_end = (jt >= it ? m : imin(m, (jt-it)*nb+j+kl+nb+1)); // +nb because we use dgetrf on panel and pivot back within the panel. // so the last tile in panel could fill. } else if (uplo == PlasmaUpper) { i_start = imax(0, (jt-it)*nb+j-ku); i_end = imin(m, (jt-it)*nb+j+1); } else { i_start = imax(0, (jt-it)*nb+j); i_end = imin(m, (jt-it)*nb+j+kl+1); } for (i = 0; i < i_start; i++) { B[i + j*ldb] = 0.0; } for (i = i_start; i < i_end; i++) { B[i + j*ldb] = A[i + j*lda]; } for (i = i_end; i < m; i++) { B[i + j*ldb] = 0.0; } } for (j = j_end; j < n; j++) { for (i = 0; i < m; i++) { B[i + j*ldb] = 0.0; } } } /******************************************************************************/ void plasma_core_omp_dlacpy_lapack2tile_band(plasma_enum_t uplo, int it, int jt, int m, int n, int nb, int kl, int ku, const double *A, int lda, double *B, int ldb) { #pragma omp task depend(in:A[0:lda*n]) \ depend(out:B[0:ldb*n]) plasma_core_dlacpy_lapack2tile_band(uplo, it, jt, m, n, nb, kl, ku, A, lda, B, ldb); } /******************************************************************************* * * @ingroup core_double * * plasma_core_dlacpy copies all or part of a two-dimensional matrix A to another * matrix B * ******************************************************************************* * * @param[in] it * The row block index of the tile. * * @param[in] jt * The column block index of the tile. * * @param[in] m * The number of rows of the matrices A and B. m >= 0. * * @param[in] n * The number of columns of the matrices A and B. n >= 0. * * @param[in] A * The m-by-n matrix to copy. * * @param[in] lda * The leading dimension of the array A. lda >= max(1, m). * * @param[out] B * The m-by-n copy of the matrix A. * On exit, B = A ONLY in the locations specified by uplo. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1, m). * ******************************************************************************/ __attribute__((weak)) void plasma_core_dlacpy_tile2lapack_band(plasma_enum_t uplo, int it, int jt, int m, int n, int nb, int kl, int ku, const double *B, int ldb, double *A, int lda) { int i, j; int j_start, j_end; if (uplo == PlasmaGeneral) { j_start = 0; // pivot back and could fill in j_end = (jt <= it ? n : imin(n, (it-jt)*nb+m+ku+kl+1)); } else if (uplo == PlasmaUpper) { j_start = 0; j_end = imin(n, (it-jt)*nb+m+ku+1); } else { j_start = imax(0, (it-jt)*nb-kl); j_end = n; } for (j = j_start; j < j_end; j++) { int i_start, i_end; if (uplo == PlasmaGeneral) { i_start = (jt <= it ? 0 : imax(0, (jt-it)*nb+j-ku-kl)); i_end = (jt >= it ? m : imin(m, (jt-it)*nb+j+kl+nb+1)); // +nb because we use dgetrf on panel and pivot back within the panel. // so the last tile in panel could fill. } else if (uplo == PlasmaUpper) { i_start = imax(0, (jt-it)*nb+j-ku); i_end = imin(m, (jt-it)*nb+j+1); } else { i_start = imax(0, (jt-it)*nb+j); i_end = imin(m, (jt-it)*nb+j+kl+1); } for (i = i_start; i < i_end; i++) { A[i + j*lda] = B[i + j*ldb]; } } } /******************************************************************************/ void plasma_core_omp_dlacpy_tile2lapack_band(plasma_enum_t uplo, int it, int jt, int m, int n, int nb, int kl, int ku, const double *B, int ldb, double *A, int lda) { #pragma omp task depend(in:B[0:ldb*n]) \ depend(out:A[0:lda*n]) plasma_core_dlacpy_tile2lapack_band(uplo, it, jt, m, n, nb, kl, ku, B, ldb, A, lda); }
argon2_base.h
#pragma once #include <cstdint> #include <stdexcept> #include <new> #include <cstdlib> #include <memory> #include "argonishche.h" #include "internal/blake2b/blake2b.h" namespace argonishche { const uint32_t ARGON2_PREHASH_DIGEST_LENGTH = 64; const uint32_t ARGON2_SECRET_MAX_LENGTH = 32; const uint32_t ARGON2_PREHASH_SEED_LENGTH = 72; const uint32_t ARGON2_BLOCK_SIZE = 1024; const uint32_t ARGON2_QWORDS_IN_BLOCK = ARGON2_BLOCK_SIZE / 8; const uint32_t ARGON2_OWORDS_IN_BLOCK = ARGON2_BLOCK_SIZE / 16; const uint32_t ARGON2_HWORDS_IN_BLOCK = ARGON2_BLOCK_SIZE / 32; const uint32_t ARGON2_ADDRESSES_IN_BLOCK = 128; const uint32_t ARGON2_SYNC_POINTS = 4; const uint32_t ARGON2_SALT_MIN_LEN = 8; const uint32_t ARGON2_MIN_OUTLEN = 4; struct block { uint64_t v[ARGON2_QWORDS_IN_BLOCK]; }; template <InstructionSet instructionSet, uint32_t mcost, uint32_t threads> class Argon2 : public Argon2Base { public: Argon2(Argon2Type atype, uint32_t tcost, const uint8_t *key, uint32_t keylen) : secretlen__(keylen), tcost__(tcost), atype__(atype) { if(secretlen__) memcpy(secret__, key, keylen); } virtual ~Argon2() override { if (secretlen__) { secure_zero_memory__(secret__, secretlen__); secretlen__ = 0; } } virtual void Hash(const uint8_t *pwd, uint32_t pwdlen, const uint8_t *salt, uint32_t saltlen, uint8_t *out, uint32_t outlen, const uint8_t *aad = nullptr, uint32_t aadlen = 0) const override { std::unique_ptr<block[]> buffer(new block[memory_blocks__]); internal_hash__(buffer.get(), pwd, pwdlen, salt, saltlen, out, outlen, aad, aadlen); } virtual bool Verify(const uint8_t *pwd, uint32_t pwdlen, const uint8_t *salt, uint32_t saltlen, const uint8_t *hash, uint32_t hashlen, const uint8_t *aad = nullptr, uint32_t aadlen = 0) const override { std::unique_ptr<uint8_t[]> hash_result(new uint8_t[hashlen]); Hash(pwd, pwdlen, salt, saltlen, hash_result.get(), hashlen, aad, aadlen); return secure_compare__(hash, hash_result.get(), hashlen); } virtual void HashWithCustomMemory(uint8_t* memory, size_t mlen, const uint8_t *pwd, uint32_t pwdlen, const uint8_t* salt, uint32_t saltlen, uint8_t* out, uint32_t outlen, const uint8_t* aad = nullptr, uint32_t aadlen = 0) const override { if(memory == nullptr || mlen < sizeof(block) * memory_blocks__) throw std::runtime_error("memory is null or its size is not enough"); internal_hash__((block*)memory, pwd, pwdlen, salt, saltlen, out, outlen, aad, aadlen); } virtual bool VerifyWithCustomMemory(uint8_t* memory, size_t mlen, const uint8_t *pwd, uint32_t pwdlen, const uint8_t *salt, uint32_t saltlen, const uint8_t *hash, uint32_t hashlen, const uint8_t *aad = nullptr, uint32_t aadlen = 0) const override { std::unique_ptr<uint8_t[]> hash_result(new uint8_t[hashlen]); HashWithCustomMemory(memory, mlen, pwd, pwdlen, salt, saltlen, hash_result.get(), hashlen, aad, aadlen); return secure_compare__(hash_result.get(), hash, hashlen); } virtual size_t GetMemorySize() const override { return memory_blocks__ * sizeof(block); } protected: /* Constants */ uint8_t secret__[ARGON2_SECRET_MAX_LENGTH] = {0}; uint32_t secretlen__ = 0; uint32_t tcost__; Argon2Type atype__; static constexpr uint32_t lanes__ = threads; static constexpr uint32_t memory_blocks__ = (mcost >= 2 * ARGON2_SYNC_POINTS * lanes__) ? (mcost - mcost % (lanes__ * ARGON2_SYNC_POINTS)) : 2 * ARGON2_SYNC_POINTS * lanes__; static constexpr uint32_t segment_length__ = memory_blocks__ / (lanes__ * ARGON2_SYNC_POINTS); static constexpr uint32_t lane_length__ = segment_length__ * ARGON2_SYNC_POINTS; protected: /* Prototypes */ virtual void fill_block__(const block *prev_block, const block *ref_block, block *next_block, bool with_xor) const = 0; virtual void copy_block__(block *dst, const block *src) const = 0; virtual void xor_block__(block *dst, const block *src) const = 0; protected: /* Static functions */ static bool secure_compare__(const uint8_t* buffer1, const uint8_t* buffer2, uint32_t len) { bool result = true; for(uint32_t i = 0; i < len; ++i) { result &= (buffer1[i] == buffer2[i]); } return result; } static void secure_zero_memory__(void *src, size_t len) { static void *(*const volatile memset_v)(void *, int, size_t) = &memset; memset_v(src, 0, len); } static void store32__(uint32_t value, void *mem) { *((uint32_t *) mem) = value; } static void blake2b_hash64__(uint8_t out[BLAKE2B_OUTBYTES], const uint8_t in[BLAKE2B_OUTBYTES]) { Blake2B<instructionSet> hash(BLAKE2B_OUTBYTES); hash.Update(in, BLAKE2B_OUTBYTES); hash.Final(out, BLAKE2B_OUTBYTES); } static void argon2_expand_blockhash__(uint8_t expanded[ARGON2_BLOCK_SIZE], const uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH]) { uint8_t out_buffer[BLAKE2B_OUTBYTES]; uint8_t in_buffer[BLAKE2B_OUTBYTES]; const uint32_t HALF_OUT_BYTES = BLAKE2B_OUTBYTES / 2; const uint32_t HASH_BLOCKS_COUNT = ((ARGON2_BLOCK_SIZE / HALF_OUT_BYTES)); Blake2B<instructionSet> hash(BLAKE2B_OUTBYTES); hash.Update(ARGON2_BLOCK_SIZE); hash.Update(blockhash, ARGON2_PREHASH_SEED_LENGTH); hash.Final(out_buffer, BLAKE2B_OUTBYTES); memcpy(expanded, out_buffer, HALF_OUT_BYTES); for (uint32_t i = 1; i < HASH_BLOCKS_COUNT - 2; ++i) { memcpy(in_buffer, out_buffer, BLAKE2B_OUTBYTES); blake2b_hash64__(out_buffer, in_buffer); memcpy(expanded + (i * HALF_OUT_BYTES), out_buffer, HALF_OUT_BYTES); } blake2b_hash64__(in_buffer, out_buffer); memcpy(expanded + HALF_OUT_BYTES * (HASH_BLOCKS_COUNT - 2), in_buffer, BLAKE2B_OUTBYTES); } static void blake2b_long__(uint8_t* out, uint32_t outlen, const uint8_t* in, uint32_t inlen) { if(outlen < BLAKE2B_OUTBYTES) { Blake2B<instructionSet> hash(outlen); hash.Update(outlen); hash.Update(in, inlen); hash.Final(out, outlen); } else { uint8_t out_buffer[BLAKE2B_OUTBYTES]; uint8_t in_buffer[BLAKE2B_OUTBYTES]; uint32_t toproduce = outlen - BLAKE2B_OUTBYTES / 2; Blake2B<instructionSet> hash(BLAKE2B_OUTBYTES); hash.Update(outlen); hash.Update(in, inlen); hash.Final(out_buffer, BLAKE2B_OUTBYTES); memcpy(out, out_buffer, BLAKE2B_OUTBYTES / 2); out += BLAKE2B_OUTBYTES / 2; while(toproduce > BLAKE2B_OUTBYTES) { memcpy(in_buffer, out_buffer, BLAKE2B_OUTBYTES); Blake2B<instructionSet> hash(BLAKE2B_OUTBYTES); hash.Update(in_buffer, BLAKE2B_OUTBYTES); hash.Final(out_buffer, BLAKE2B_OUTBYTES); memcpy(out, out_buffer, BLAKE2B_OUTBYTES / 2); out += BLAKE2B_OUTBYTES / 2; toproduce -= BLAKE2B_OUTBYTES / 2; } memcpy(in_buffer, out_buffer, BLAKE2B_OUTBYTES); { Blake2B<instructionSet> hash(BLAKE2B_OUTBYTES); hash.Update(in_buffer, toproduce); hash.Final(out_buffer, BLAKE2B_OUTBYTES); memcpy(out, out_buffer, toproduce); } } } static void init_block_value__(block *b, uint8_t in) { memset(b->v, in, sizeof(b->v)); } protected: /* Functions */ void internal_hash__(block* memory, const uint8_t *pwd, uint32_t pwdlen, const uint8_t *salt, uint32_t saltlen, uint8_t *out, uint32_t outlen, const uint8_t *aad, uint32_t aadlen) const { /* * all parameters checks are in proxy objects */ initialize__(memory, outlen, pwd, pwdlen, salt, saltlen, aad, aadlen); fill_memory_blocks__(memory); finalize__(memory, out, outlen); } void initial_hash__(uint8_t blockhash[ARGON2_PREHASH_DIGEST_LENGTH], uint32_t outlen, const uint8_t *pwd, uint32_t pwdlen, const uint8_t *salt, uint32_t saltlen, const uint8_t *aad, uint32_t aadlen) const { Blake2B<instructionSet> hash(ARGON2_PREHASH_DIGEST_LENGTH); /* lanes, but lanes == threads */ hash.Update(lanes__); /* outlen */ hash.Update(outlen); /* m_cost */ hash.Update(mcost); /* t_cost */ hash.Update(tcost__); /* version */ hash.Update(0x00000013); hash.Update((uint32_t)atype__); /* pwdlen */ hash.Update(pwdlen); /* pwd */ hash.Update(pwd, pwdlen); /* saltlen */ hash.Update(saltlen); /* salt */ if(saltlen) hash.Update(salt, saltlen); /* secret */ hash.Update(secretlen__); if (secretlen__) hash.Update((void *) secret__, secretlen__); /* aadlen */ hash.Update(aadlen); if (aadlen) hash.Update((void *) aad, aadlen); hash.Final(blockhash, ARGON2_PREHASH_DIGEST_LENGTH); } void fill_first_blocks__(block* blocks, uint8_t *blockhash) const { for (uint32_t l = 0; l < lanes__; l++) { /* fill the first block of the lane */ store32__(l, blockhash + ARGON2_PREHASH_DIGEST_LENGTH + 4); store32__(0, blockhash + ARGON2_PREHASH_DIGEST_LENGTH); argon2_expand_blockhash__((uint8_t*)&(blocks[l * lane_length__]), blockhash); /* fill the second block of the lane */ store32__(1, blockhash + ARGON2_PREHASH_DIGEST_LENGTH); argon2_expand_blockhash__((uint8_t*)&(blocks[l * lane_length__ + 1]), blockhash); } } /* The 'if' will be optimized out as the number of threads is known at the compile time */ void fill_memory_blocks__(block* memory) const { for (uint32_t t = 0; t < tcost__; ++t) { for (uint32_t s = 0; s < ARGON2_SYNC_POINTS; ++s) { #ifdef _OPENMP #pragma omp parallel for #endif for (uint32_t l = 0; l < lanes__; ++l) { fill_segment__(memory, t, l, s); } } } } void initialize__(block *memory, uint32_t outlen, const uint8_t *pwd, uint32_t pwdlen, const uint8_t *salt, uint32_t saltlen, const uint8_t *aad, uint32_t aadlen) const { uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH]; initial_hash__(blockhash, outlen, pwd, pwdlen, salt, saltlen, aad, aadlen); fill_first_blocks__(memory, blockhash); } uint32_t compute_reference_area__(uint32_t pass, uint32_t slice, uint32_t index, bool same_lane) const { uint32_t pass_val = pass == 0 ? (slice * segment_length__) : (lane_length__ - segment_length__); return same_lane ? pass_val + (index - 1) : pass_val + (index == 0 ? -1 : 0); } uint32_t index_alpha__(uint32_t pass, uint32_t slice, uint32_t index, uint32_t pseudo_rand, bool same_lane) const { uint32_t reference_area_size = compute_reference_area__(pass, slice, index, same_lane); uint64_t relative_position = pseudo_rand; relative_position = relative_position * relative_position >> 32; relative_position = reference_area_size - 1 - (reference_area_size * relative_position >> 32); uint32_t start_position = 0; if (pass != 0) start_position = (slice == ARGON2_SYNC_POINTS - 1) ? 0 : (slice + 1) * segment_length__; return (uint32_t)((start_position + relative_position) % lane_length__); } void next_addresses(block *address_block, block *input_block, const block *zero_block) const { input_block->v[6]++; fill_block__(zero_block, input_block, address_block, false); fill_block__(zero_block, address_block, address_block, false); } void finalize__(const block* memory, uint8_t* out, uint32_t outlen) const { block blockhash; copy_block__(&blockhash, memory + lane_length__ - 1); /* XOR the last blocks */ for (uint32_t l = 1; l < lanes__; ++l) { uint32_t last_block_in_lane = l * lane_length__ + (lane_length__ - 1); xor_block__(&blockhash, memory + last_block_in_lane); } blake2b_long__(out, outlen, (uint8_t*)blockhash.v, ARGON2_BLOCK_SIZE); } /* The switch will be optimized out by the compiler as the type is known at the compile time */ void fill_segment__(block *memory, uint32_t pass, uint32_t lane, uint32_t slice) const { switch (atype__) { case Argon2Type::Argon2_d: fill_segment_d__(memory, pass, lane, slice); return; case Argon2Type::Argon2_i: fill_segment_i__(memory, pass, lane, slice, Argon2Type::Argon2_i); return; case Argon2Type::Argon2_id: if(pass == 0 && slice < ARGON2_SYNC_POINTS / 2) fill_segment_i__(memory, pass, lane, slice, Argon2Type::Argon2_id); else fill_segment_d__(memory, pass, lane, slice); return; } } void fill_segment_d__(block *memory, uint32_t pass, uint32_t lane, uint32_t slice) const { uint32_t starting_index = (pass == 0 && slice == 0) ? 2 : 0; uint32_t curr_offset = lane * lane_length__ + slice * segment_length__ + starting_index; uint32_t prev_offset = curr_offset + ((curr_offset % lane_length__ == 0) ? lane_length__ : 0) - 1; for (uint32_t i = starting_index; i < segment_length__; ++i, ++curr_offset, ++prev_offset) { if (curr_offset % lane_length__ == 1) { prev_offset = curr_offset - 1; } uint64_t pseudo_rand = memory[prev_offset].v[0]; uint64_t ref_lane = (pass == 0 && slice == 0) ? lane : (((pseudo_rand >> 32)) % lanes__); uint64_t ref_index = index_alpha__(pass, slice, i, (uint32_t)(pseudo_rand & 0xFFFFFFFF), ref_lane == lane); block* ref_block = memory + lane_length__ * ref_lane + ref_index; fill_block__(memory + prev_offset, ref_block, memory + curr_offset, pass != 0); } } void fill_segment_i__(block *memory, uint32_t pass, uint32_t lane, uint32_t slice, Argon2Type atp) const { block address_block, input_block, zero_block; init_block_value__(&zero_block, 0); init_block_value__(&input_block, 0); input_block.v[0] = pass; input_block.v[1] = lane; input_block.v[2] = slice; input_block.v[3] = memory_blocks__; input_block.v[4] = tcost__; input_block.v[5] = (uint64_t)atp; uint32_t starting_index = 0; if (pass == 0 && slice == 0) { starting_index = 2; next_addresses(&address_block, &input_block, &zero_block); } uint32_t curr_offset = lane * lane_length__ + slice * segment_length__ + starting_index; uint32_t prev_offset = curr_offset + ((curr_offset % lane_length__ == 0) ? lane_length__ : 0) - 1; for (uint32_t i = starting_index; i < segment_length__; ++i, ++curr_offset, ++prev_offset) { if (curr_offset % lane_length__ == 1) { prev_offset = curr_offset - 1; } if (i % ARGON2_ADDRESSES_IN_BLOCK == 0) { next_addresses(&address_block, &input_block, &zero_block); } uint64_t pseudo_rand = address_block.v[i % ARGON2_ADDRESSES_IN_BLOCK]; uint64_t ref_lane = (pass == 0 && slice == 0)? lane : (((pseudo_rand >> 32)) % lanes__); uint64_t ref_index = index_alpha__(pass, slice, i, (uint32_t)(pseudo_rand & 0xFFFFFFFF), ref_lane == lane); block* ref_block = memory + lane_length__ * ref_lane + ref_index; fill_block__(memory + prev_offset, ref_block, memory + curr_offset, pass != 0); } } }; }
builder.h
// Copyright (c) 2015, The Regents of the University of California (Regents) // See LICENSE.txt for license details #ifndef BUILDER_H_ #define BUILDER_H_ #include <algorithm> #include <cinttypes> #include <fstream> #include <functional> #include <type_traits> #include <utility> #include "command_line.h" #include "generator.h" #include "graph.h" #include "platform_atomics.h" #include "pvector.h" #include "reader.h" #include "timer.h" #include "util.h" /* GAP Benchmark Suite Class: BuilderBase Author: Scott Beamer Given arguements from the command line (cli), returns a built graph - MakeGraph() will parse cli and obtain edgelist and call MakeGraphFromEL(edgelist) to perform actual graph construction - edgelist can be from file (reader) or synthetically generated (generator) - Common case: BuilderBase typedef'd (w/ params) to be Builder (benchmark.h) */ template <typename NodeID_, typename DestID_ = NodeID_, typename WeightT_ = NodeID_, bool invert = true> class BuilderBase { typedef EdgePair<NodeID_, DestID_> Edge; typedef pvector<Edge> EdgeList; const CLBase &cli_; bool symmetrize_; bool needs_weights_; int64_t num_nodes_ = -1; public: explicit BuilderBase(const CLBase &cli) : cli_(cli) { symmetrize_ = cli_.symmetrize(); needs_weights_ = !std::is_same<NodeID_, DestID_>::value; } DestID_ GetSource(EdgePair<NodeID_, NodeID_> e) { return e.u; } DestID_ GetSource(EdgePair<NodeID_, NodeWeight<NodeID_, WeightT_>> e) { return NodeWeight<NodeID_, WeightT_>(e.u, e.v.w); } NodeID_ FindMaxNodeID(const EdgeList &el) { NodeID_ max_seen = 0; #pragma omp parallel for reduction(max : max_seen) for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; max_seen = std::max(max_seen, e.u); max_seen = std::max(max_seen, (NodeID_) e.v); } return max_seen; } pvector<NodeID_> CountDegrees(const EdgeList &el, bool transpose) { pvector<NodeID_> degrees(num_nodes_, 0); #pragma omp parallel for for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; if (symmetrize_ || (!symmetrize_ && !transpose)) fetch_and_add(degrees[e.u], 1); if (symmetrize_ || (!symmetrize_ && transpose)) fetch_and_add(degrees[(NodeID_) e.v], 1); } return degrees; } static pvector<SGOffset> PrefixSum(const pvector<NodeID_> &degrees) { pvector<SGOffset> sums(degrees.size() + 1); SGOffset total = 0; for (size_t n=0; n < degrees.size(); n++) { sums[n] = total; total += degrees[n]; } sums[degrees.size()] = total; return sums; } static pvector<SGOffset> ParallelPrefixSum(const pvector<NodeID_> &degrees) { const size_t block_size = 1<<20; const size_t num_blocks = (degrees.size() + block_size - 1) / block_size; pvector<SGOffset> local_sums(num_blocks); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { SGOffset lsum = 0; size_t block_end = std::min((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) lsum += degrees[i]; local_sums[block] = lsum; } pvector<SGOffset> bulk_prefix(num_blocks+1); SGOffset total = 0; for (size_t block=0; block < num_blocks; block++) { bulk_prefix[block] = total; total += local_sums[block]; } bulk_prefix[num_blocks] = total; pvector<SGOffset> prefix(degrees.size() + 1); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { SGOffset local_total = bulk_prefix[block]; size_t block_end = std::min((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) { prefix[i] = local_total; local_total += degrees[i]; } } prefix[degrees.size()] = bulk_prefix[num_blocks]; return prefix; } // Removes self-loops and redundant edges // Side effect: neighbor IDs will be sorted void SquishCSR(const CSRGraph<NodeID_, DestID_, invert> &g, bool transpose, DestID_*** sq_index, DestID_** sq_neighs) { pvector<NodeID_> diffs(g.num_nodes()); DestID_ *n_start, *n_end; #pragma omp parallel for private(n_start, n_end) for (NodeID_ n=0; n < g.num_nodes(); n++) { if (transpose) { n_start = g.in_neigh(n).begin(); n_end = g.in_neigh(n).end(); } else { n_start = g.out_neigh(n).begin(); n_end = g.out_neigh(n).end(); } std::sort(n_start, n_end); DestID_ *new_end = std::unique(n_start, n_end); new_end = std::remove(n_start, new_end, n); diffs[n] = new_end - n_start; } pvector<SGOffset> sq_offsets = ParallelPrefixSum(diffs); *sq_neighs = new DestID_[sq_offsets[g.num_nodes()]]; *sq_index = CSRGraph<NodeID_, DestID_>::GenIndex(sq_offsets, *sq_neighs); #pragma omp parallel for private(n_start) for (NodeID_ n=0; n < g.num_nodes(); n++) { if (transpose) n_start = g.in_neigh(n).begin(); else n_start = g.out_neigh(n).begin(); std::copy(n_start, n_start+diffs[n], (*sq_index)[n]); } } CSRGraph<NodeID_, DestID_, invert> SquishGraph( const CSRGraph<NodeID_, DestID_, invert> &g) { DestID_ **out_index, *out_neighs, **in_index, *in_neighs; SquishCSR(g, false, &out_index, &out_neighs); if (g.directed()) { if (invert) SquishCSR(g, true, &in_index, &in_neighs); return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index, out_neighs, in_index, in_neighs); } else { return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index, out_neighs); } } /* Graph Bulding Steps (for CSR): - Read edgelist once to determine vertex degrees (CountDegrees) - Determine vertex offsets by a prefix sum (ParallelPrefixSum) - Allocate storage and set points according to offsets (GenIndex) - Copy edges into storage */ void MakeCSR(const EdgeList &el, bool transpose, DestID_*** index, DestID_** neighs) { pvector<NodeID_> degrees = CountDegrees(el, transpose); pvector<SGOffset> offsets = ParallelPrefixSum(degrees); *neighs = new DestID_[offsets[num_nodes_]]; *index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, *neighs); #pragma omp parallel for for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; if (symmetrize_ || (!symmetrize_ && !transpose)) (*neighs)[fetch_and_add(offsets[e.u], 1)] = e.v; if (symmetrize_ || (!symmetrize_ && transpose)) (*neighs)[fetch_and_add(offsets[static_cast<NodeID_>(e.v)], 1)] = GetSource(e); } } CSRGraph<NodeID_, DestID_, invert> MakeGraphFromEL(EdgeList &el) { DestID_ **index = nullptr, **inv_index = nullptr; DestID_ *neighs = nullptr, *inv_neighs = nullptr; Timer t; t.Start(); if (num_nodes_ == -1) num_nodes_ = FindMaxNodeID(el)+1; if (needs_weights_) Generator<NodeID_, DestID_, WeightT_>::InsertWeights(el); MakeCSR(el, false, &index, &neighs); if (!symmetrize_ && invert) MakeCSR(el, true, &inv_index, &inv_neighs); t.Stop(); PrintTime("Build Time", t.Seconds()); if (symmetrize_) return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs); else return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs, inv_index, inv_neighs); } CSRGraph<NodeID_, DestID_, invert> MakeGraph() { CSRGraph<NodeID_, DestID_, invert> g; { // extra scope to trigger earlier deletion of el (save memory) EdgeList el; if (cli_.filename() != "") { Reader<NodeID_, DestID_, WeightT_, invert> r(cli_.filename()); if ((r.GetSuffix() == ".sg") || (r.GetSuffix() == ".wsg")) { return r.ReadSerializedGraph(); } else { el = r.ReadFile(needs_weights_); } } else if (cli_.scale() != -1) { Generator<NodeID_, DestID_> gen(cli_.scale(), cli_.degree()); el = gen.GenerateEL(cli_.uniform()); } g = MakeGraphFromEL(el); } return SquishGraph(g); } // Relabels (and rebuilds) graph by order of decreasing degree static CSRGraph<NodeID_, DestID_, invert> RelabelByDegree( const CSRGraph<NodeID_, DestID_, invert> &g) { if (g.directed()) { std::cout << "Cannot relabel directed graph" << std::endl; std::exit(-11); } Timer t; t.Start(); typedef std::pair<int64_t, NodeID_> degree_node_p; pvector<degree_node_p> degree_id_pairs(g.num_nodes()); #pragma omp parallel for for (NodeID_ n=0; n < g.num_nodes(); n++) degree_id_pairs[n] = std::make_pair(g.out_degree(n), n); std::sort(degree_id_pairs.begin(), degree_id_pairs.end(), std::greater<degree_node_p>()); pvector<NodeID_> degrees(g.num_nodes()); pvector<NodeID_> new_ids(g.num_nodes()); #pragma omp parallel for for (NodeID_ n=0; n < g.num_nodes(); n++) { degrees[n] = degree_id_pairs[n].first; new_ids[degree_id_pairs[n].second] = n; } pvector<SGOffset> offsets = ParallelPrefixSum(degrees); DestID_* neighs = new DestID_[offsets[g.num_nodes()]]; DestID_** index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, neighs); #pragma omp parallel for for (NodeID_ u=0; u < g.num_nodes(); u++) { for (NodeID_ v : g.out_neigh(u)) neighs[offsets[new_ids[u]]++] = new_ids[v]; std::sort(index[new_ids[u]], index[new_ids[u]+1]); } t.Stop(); PrintTime("Relabel", t.Seconds()); return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), index, neighs); } }; #endif // BUILDER_H_
morphology.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y % % MM MM O O R R P P H H O O L O O G Y Y % % M M M O O RRRR PPPP HHHHH O O L O O G GGG Y % % M M O O R R P H H O O L O O G G Y % % M M OOO R R P H H OOO LLLLL OOO GGG Y % % % % % % MagickCore Morphology Methods % % % % Software Design % % Anthony Thyssen % % January 2010 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Morphology is the application of various kernels, of any size or shape, to an % image in various ways (typically binary, but not always). % % Convolution (weighted sum or average) is just one specific type of % morphology. Just one that is very common for image bluring and sharpening % effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring. % % This module provides not only a general morphology function, and the ability % to apply more advanced or iterative morphologies, but also functions for the % generation of many different types of kernel arrays from user supplied % arguments. Prehaps even the generation of a kernel from a small image. */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/linked-list.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor-private.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/prepress.h" #include "MagickCore/quantize.h" #include "MagickCore/resource_.h" #include "MagickCore/registry.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" /* Other global definitions used by module. */ #define Minimize(assign,value) assign=MagickMin(assign,value) #define Maximize(assign,value) assign=MagickMax(assign,value) /* Integer Factorial Function - for a Binomial kernel */ #if 1 static inline size_t fact(size_t n) { size_t f,l; for(f=1, l=2; l <= n; f=f*l, l++); return(f); } #elif 1 /* glibc floating point alternatives */ #define fact(n) ((size_t)tgamma((double)n+1)) #else #define fact(n) ((size_t)lgamma((double)n+1)) #endif /* Currently these are only internal to this module */ static void CalcKernelMetaData(KernelInfo *), ExpandMirrorKernelInfo(KernelInfo *), ExpandRotateKernelInfo(KernelInfo *, const double), RotateKernelInfo(KernelInfo *, double); /* Quick function to find last kernel in a kernel list */ static inline KernelInfo *LastKernelInfo(KernelInfo *kernel) { while (kernel->next != (KernelInfo *) NULL) kernel=kernel->next; return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireKernelInfo() takes the given string (generally supplied by the % user) and converts it into a Morphology/Convolution Kernel. This allows % users to specify a kernel from a number of pre-defined kernels, or to fully % specify their own kernel for a specific Convolution or Morphology % Operation. % % The kernel so generated can be any rectangular array of floating point % values (doubles) with the 'control point' or 'pixel being affected' % anywhere within that array of values. % % Previously IM was restricted to a square of odd size using the exact % center as origin, this is no longer the case, and any rectangular kernel % with any value being declared the origin. This in turn allows the use of % highly asymmetrical kernels. % % The floating point values in the kernel can also include a special value % known as 'nan' or 'not a number' to indicate that this value is not part % of the kernel array. This allows you to shaped the kernel within its % rectangular area. That is 'nan' values provide a 'mask' for the kernel % shape. However at least one non-nan value must be provided for correct % working of a kernel. % % The returned kernel should be freed using the DestroyKernelInfo() when you % are finished with it. Do not free this memory yourself. % % Input kernel defintion strings can consist of any of three types. % % "name:args[[@><]" % Select from one of the built in kernels, using the name and % geometry arguments supplied. See AcquireKernelBuiltIn() % % "WxH[+X+Y][@><]:num, num, num ..." % a kernel of size W by H, with W*H floating point numbers following. % the 'center' can be optionally be defined at +X+Y (such that +0+0 % is top left corner). If not defined the pixel in the center, for % odd sizes, or to the immediate top or left of center for even sizes % is automatically selected. % % "num, num, num, num, ..." % list of floating point numbers defining an 'old style' odd sized % square kernel. At least 9 values should be provided for a 3x3 % square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc. % Values can be space or comma separated. This is not recommended. % % You can define a 'list of kernels' which can be used by some morphology % operators A list is defined as a semi-colon separated list kernels. % % " kernel ; kernel ; kernel ; " % % Any extra ';' characters, at start, end or between kernel defintions are % simply ignored. % % The special flags will expand a single kernel, into a list of rotated % kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree % cyclic rotations, while a '>' will generate a list of 90-degree rotations. % The '<' also exands using 90-degree rotates, but giving a 180-degree % reflected kernel before the +/- 90-degree rotations, which can be important % for Thinning operations. % % Note that 'name' kernels will start with an alphabetic character while the % new kernel specification has a ':' character in its specification string. % If neither is the case, it is assumed an old style of a simple list of % numbers generating a odd-sized square kernel has been given. % % The format of the AcquireKernal method is: % % KernelInfo *AcquireKernelInfo(const char *kernel_string) % % A description of each parameter follows: % % o kernel_string: the Morphology/Convolution kernel wanted. % */ /* This was separated so that it could be used as a separate ** array input handling function, such as for -color-matrix */ static KernelInfo *ParseKernelArray(const char *kernel_string) { KernelInfo *kernel; char token[MagickPathExtent]; const char *p, *end; register ssize_t i; double nan = sqrt((double)-1.0); /* Special Value : Not A Number */ MagickStatusType flags; GeometryInfo args; kernel=(KernelInfo *) AcquireQuantumMemory(1,sizeof(*kernel)); if (kernel == (KernelInfo *) NULL) return(kernel); (void) memset(kernel,0,sizeof(*kernel)); kernel->minimum = kernel->maximum = kernel->angle = 0.0; kernel->negative_range = kernel->positive_range = 0.0; kernel->type = UserDefinedKernel; kernel->next = (KernelInfo *) NULL; kernel->signature=MagickCoreSignature; if (kernel_string == (const char *) NULL) return(kernel); /* find end of this specific kernel definition string */ end = strchr(kernel_string, ';'); if ( end == (char *) NULL ) end = strchr(kernel_string, '\0'); /* clear flags - for Expanding kernel lists thorugh rotations */ flags = NoValue; /* Has a ':' in argument - New user kernel specification FUTURE: this split on ':' could be done by StringToken() */ p = strchr(kernel_string, ':'); if ( p != (char *) NULL && p < end) { /* ParseGeometry() needs the geometry separated! -- Arrgghh */ memcpy(token, kernel_string, (size_t) (p-kernel_string)); token[p-kernel_string] = '\0'; SetGeometryInfo(&args); flags = ParseGeometry(token, &args); /* Size handling and checks of geometry settings */ if ( (flags & WidthValue) == 0 ) /* if no width then */ args.rho = args.sigma; /* then width = height */ if ( args.rho < 1.0 ) /* if width too small */ args.rho = 1.0; /* then width = 1 */ if ( args.sigma < 1.0 ) /* if height too small */ args.sigma = args.rho; /* then height = width */ kernel->width = (size_t)args.rho; kernel->height = (size_t)args.sigma; /* Offset Handling and Checks */ if ( args.xi < 0.0 || args.psi < 0.0 ) return(DestroyKernelInfo(kernel)); kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi : (ssize_t) (kernel->width-1)/2; kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi : (ssize_t) (kernel->height-1)/2; if ( kernel->x >= (ssize_t) kernel->width || kernel->y >= (ssize_t) kernel->height ) return(DestroyKernelInfo(kernel)); p++; /* advance beyond the ':' */ } else { /* ELSE - Old old specification, forming odd-square kernel */ /* count up number of values given */ p=(const char *) kernel_string; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\'')) p++; /* ignore "'" chars for convolve filter usage - Cristy */ for (i=0; p < end; i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); } /* set the size of the kernel - old sized square */ kernel->width = kernel->height= (size_t) sqrt((double) i+1.0); kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; p=(const char *) kernel_string; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\'')) p++; /* ignore "'" chars for convolve filter usage - Cristy */ } /* Read in the kernel values from rest of input string argument */ kernel->values=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory( kernel->width,kernel->height*sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); kernel->minimum=MagickMaximumValue; kernel->maximum=(-MagickMaximumValue); kernel->negative_range = kernel->positive_range = 0.0; for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++) { (void) GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') (void) GetNextToken(p,&p,MagickPathExtent,token); if ( LocaleCompare("nan",token) == 0 || LocaleCompare("-",token) == 0 ) { kernel->values[i] = nan; /* this value is not part of neighbourhood */ } else { kernel->values[i] = StringToDouble(token,(char **) NULL); ( kernel->values[i] < 0) ? ( kernel->negative_range += kernel->values[i] ) : ( kernel->positive_range += kernel->values[i] ); Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); } } /* sanity check -- no more values in kernel definition */ (void) GetNextToken(p,&p,MagickPathExtent,token); if ( *token != '\0' && *token != ';' && *token != '\'' ) return(DestroyKernelInfo(kernel)); #if 0 /* this was the old method of handling a incomplete kernel */ if ( i < (ssize_t) (kernel->width*kernel->height) ) { Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); for ( ; i < (ssize_t) (kernel->width*kernel->height); i++) kernel->values[i]=0.0; } #else /* Number of values for kernel was not enough - Report Error */ if ( i < (ssize_t) (kernel->width*kernel->height) ) return(DestroyKernelInfo(kernel)); #endif /* check that we recieved at least one real (non-nan) value! */ if (kernel->minimum == MagickMaximumValue) return(DestroyKernelInfo(kernel)); if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */ ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */ else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */ else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */ ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */ return(kernel); } static KernelInfo *ParseKernelName(const char *kernel_string, ExceptionInfo *exception) { char token[MagickPathExtent]; const char *p, *end; GeometryInfo args; KernelInfo *kernel; MagickStatusType flags; ssize_t type; /* Parse special 'named' kernel */ (void) GetNextToken(kernel_string,&p,MagickPathExtent,token); type=ParseCommandOption(MagickKernelOptions,MagickFalse,token); if ( type < 0 || type == UserDefinedKernel ) return((KernelInfo *) NULL); /* not a valid named kernel */ while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';')) p++; end = strchr(p, ';'); /* end of this kernel defintion */ if ( end == (char *) NULL ) end = strchr(p, '\0'); /* ParseGeometry() needs the geometry separated! -- Arrgghh */ memcpy(token, p, (size_t) (end-p)); token[end-p] = '\0'; SetGeometryInfo(&args); flags = ParseGeometry(token, &args); #if 0 /* For Debugging Geometry Input */ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", flags, args.rho, args.sigma, args.xi, args.psi ); #endif /* special handling of missing values in input string */ switch( type ) { /* Shape Kernel Defaults */ case UnityKernel: if ( (flags & WidthValue) == 0 ) args.rho = 1.0; /* Default scale = 1.0, zero is valid */ break; case SquareKernel: case DiamondKernel: case OctagonKernel: case DiskKernel: case PlusKernel: case CrossKernel: if ( (flags & HeightValue) == 0 ) args.sigma = 1.0; /* Default scale = 1.0, zero is valid */ break; case RingKernel: if ( (flags & XValue) == 0 ) args.xi = 1.0; /* Default scale = 1.0, zero is valid */ break; case RectangleKernel: /* Rectangle - set size defaults */ if ( (flags & WidthValue) == 0 ) /* if no width then */ args.rho = args.sigma; /* then width = height */ if ( args.rho < 1.0 ) /* if width too small */ args.rho = 3; /* then width = 3 */ if ( args.sigma < 1.0 ) /* if height too small */ args.sigma = args.rho; /* then height = width */ if ( (flags & XValue) == 0 ) /* center offset if not defined */ args.xi = (double)(((ssize_t)args.rho-1)/2); if ( (flags & YValue) == 0 ) args.psi = (double)(((ssize_t)args.sigma-1)/2); break; /* Distance Kernel Defaults */ case ChebyshevKernel: case ManhattanKernel: case OctagonalKernel: case EuclideanKernel: if ( (flags & HeightValue) == 0 ) /* no distance scale */ args.sigma = 100.0; /* default distance scaling */ else if ( (flags & AspectValue ) != 0 ) /* '!' flag */ args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */ else if ( (flags & PercentValue ) != 0 ) /* '%' flag */ args.sigma *= QuantumRange/100.0; /* percentage of color range */ break; default: break; } kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args, exception); if ( kernel == (KernelInfo *) NULL ) return(kernel); /* global expand to rotated kernel list - only for single kernels */ if ( kernel->next == (KernelInfo *) NULL ) { if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 45.0); else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 90.0); else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */ ExpandMirrorKernelInfo(kernel); } return(kernel); } MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string, ExceptionInfo *exception) { KernelInfo *kernel, *new_kernel; char *kernel_cache, token[MagickPathExtent]; const char *p; if (kernel_string == (const char *) NULL) return(ParseKernelArray(kernel_string)); p=kernel_string; kernel_cache=(char *) NULL; if (*kernel_string == '@') { kernel_cache=FileToString(kernel_string+1,~0UL,exception); if (kernel_cache == (char *) NULL) return((KernelInfo *) NULL); p=(const char *) kernel_cache; } kernel=NULL; while (GetNextToken(p,(const char **) NULL,MagickPathExtent,token), *token != '\0') { /* ignore extra or multiple ';' kernel separators */ if (*token != ';') { /* tokens starting with alpha is a Named kernel */ if (isalpha((int) ((unsigned char) *token)) != 0) new_kernel=ParseKernelName(p,exception); else /* otherwise a user defined kernel array */ new_kernel=ParseKernelArray(p); /* Error handling -- this is not proper error handling! */ if (new_kernel == (KernelInfo *) NULL) { if (kernel != (KernelInfo *) NULL) kernel=DestroyKernelInfo(kernel); return((KernelInfo *) NULL); } /* initialise or append the kernel list */ if (kernel == (KernelInfo *) NULL) kernel=new_kernel; else LastKernelInfo(kernel)->next=new_kernel; } /* look for the next kernel in list */ p=strchr(p,';'); if (p == (char *) NULL) break; p++; } if (kernel_cache != (char *) NULL) kernel_cache=DestroyString(kernel_cache); return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e K e r n e l B u i l t I n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireKernelBuiltIn() returned one of the 'named' built-in types of % kernels used for special purposes such as gaussian blurring, skeleton % pruning, and edge distance determination. % % They take a KernelType, and a set of geometry style arguments, which were % typically decoded from a user supplied string, or from a more complex % Morphology Method that was requested. % % The format of the AcquireKernalBuiltIn method is: % % KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type, % const GeometryInfo args) % % A description of each parameter follows: % % o type: the pre-defined type of kernel wanted % % o args: arguments defining or modifying the kernel % % Convolution Kernels % % Unity % The a No-Op or Scaling single element kernel. % % Gaussian:{radius},{sigma} % Generate a two-dimensional gaussian kernel, as used by -gaussian. % The sigma for the curve is required. The resulting kernel is % normalized, % % If 'sigma' is zero, you get a single pixel on a field of zeros. % % NOTE: that the 'radius' is optional, but if provided can limit (clip) % the final size of the resulting kernel to a square 2*radius+1 in size. % The radius should be at least 2 times that of the sigma value, or % sever clipping and aliasing may result. If not given or set to 0 the % radius will be determined so as to produce the best minimal error % result, which is usally much larger than is normally needed. % % LoG:{radius},{sigma} % "Laplacian of a Gaussian" or "Mexician Hat" Kernel. % The supposed ideal edge detection, zero-summing kernel. % % An alturnative to this kernel is to use a "DoG" with a sigma ratio of % approx 1.6 (according to wikipedia). % % DoG:{radius},{sigma1},{sigma2} % "Difference of Gaussians" Kernel. % As "Gaussian" but with a gaussian produced by 'sigma2' subtracted % from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1. % The result is a zero-summing kernel. % % Blur:{radius},{sigma}[,{angle}] % Generates a 1 dimensional or linear gaussian blur, at the angle given % (current restricted to orthogonal angles). If a 'radius' is given the % kernel is clipped to a width of 2*radius+1. Kernel can be rotated % by a 90 degree angle. % % If 'sigma' is zero, you get a single pixel on a field of zeros. % % Note that two convolutions with two "Blur" kernels perpendicular to % each other, is equivalent to a far larger "Gaussian" kernel with the % same sigma value, However it is much faster to apply. This is how the % "-blur" operator actually works. % % Comet:{width},{sigma},{angle} % Blur in one direction only, much like how a bright object leaves % a comet like trail. The Kernel is actually half a gaussian curve, % Adding two such blurs in opposite directions produces a Blur Kernel. % Angle can be rotated in multiples of 90 degrees. % % Note that the first argument is the width of the kernel and not the % radius of the kernel. % % Binomial:[{radius}] % Generate a discrete kernel using a 2 dimentional Pascel's Triangle % of values. Used for special forma of image filters. % % # Still to be implemented... % # % # Filter2D % # Filter1D % # Set kernel values using a resize filter, and given scale (sigma) % # Cylindrical or Linear. Is this possible with an image? % # % % Named Constant Convolution Kernels % % All these are unscaled, zero-summing kernels by default. As such for % non-HDRI version of ImageMagick some form of normalization, user scaling, % and biasing the results is recommended, to prevent the resulting image % being 'clipped'. % % The 3x3 kernels (most of these) can be circularly rotated in multiples of % 45 degrees to generate the 8 angled varients of each of the kernels. % % Laplacian:{type} % Discrete Lapacian Kernels, (without normalization) % Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood) % Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood) % Type 2 : 3x3 with center:4 edge:1 corner:-2 % Type 3 : 3x3 with center:4 edge:-2 corner:1 % Type 5 : 5x5 laplacian % Type 7 : 7x7 laplacian % Type 15 : 5x5 LoG (sigma approx 1.4) % Type 19 : 9x9 LoG (sigma approx 1.4) % % Sobel:{angle} % Sobel 'Edge' convolution kernel (3x3) % | -1, 0, 1 | % | -2, 0,-2 | % | -1, 0, 1 | % % Roberts:{angle} % Roberts convolution kernel (3x3) % | 0, 0, 0 | % | -1, 1, 0 | % | 0, 0, 0 | % % Prewitt:{angle} % Prewitt Edge convolution kernel (3x3) % | -1, 0, 1 | % | -1, 0, 1 | % | -1, 0, 1 | % % Compass:{angle} % Prewitt's "Compass" convolution kernel (3x3) % | -1, 1, 1 | % | -1,-2, 1 | % | -1, 1, 1 | % % Kirsch:{angle} % Kirsch's "Compass" convolution kernel (3x3) % | -3,-3, 5 | % | -3, 0, 5 | % | -3,-3, 5 | % % FreiChen:{angle} % Frei-Chen Edge Detector is based on a kernel that is similar to % the Sobel Kernel, but is designed to be isotropic. That is it takes % into account the distance of the diagonal in the kernel. % % | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | % | 1, 0, -1 | % % FreiChen:{type},{angle} % % Frei-Chen Pre-weighted kernels... % % Type 0: default un-nomalized version shown above. % % Type 1: Orthogonal Kernel (same as type 11 below) % | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 1, 0, -1 | % % Type 2: Diagonal form of Kernel... % | 1, sqrt(2), 0 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 0, -sqrt(2) -1 | % % However this kernel is als at the heart of the FreiChen Edge Detection % Process which uses a set of 9 specially weighted kernel. These 9 % kernels not be normalized, but directly applied to the image. The % results is then added together, to produce the intensity of an edge in % a specific direction. The square root of the pixel value can then be % taken as the cosine of the edge, and at least 2 such runs at 90 degrees % from each other, both the direction and the strength of the edge can be % determined. % % Type 10: All 9 of the following pre-weighted kernels... % % Type 11: | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 1, 0, -1 | % % Type 12: | 1, sqrt(2), 1 | % | 0, 0, 0 | / 2*sqrt(2) % | 1, sqrt(2), 1 | % % Type 13: | sqrt(2), -1, 0 | % | -1, 0, 1 | / 2*sqrt(2) % | 0, 1, -sqrt(2) | % % Type 14: | 0, 1, -sqrt(2) | % | -1, 0, 1 | / 2*sqrt(2) % | sqrt(2), -1, 0 | % % Type 15: | 0, -1, 0 | % | 1, 0, 1 | / 2 % | 0, -1, 0 | % % Type 16: | 1, 0, -1 | % | 0, 0, 0 | / 2 % | -1, 0, 1 | % % Type 17: | 1, -2, 1 | % | -2, 4, -2 | / 6 % | -1, -2, 1 | % % Type 18: | -2, 1, -2 | % | 1, 4, 1 | / 6 % | -2, 1, -2 | % % Type 19: | 1, 1, 1 | % | 1, 1, 1 | / 3 % | 1, 1, 1 | % % The first 4 are for edge detection, the next 4 are for line detection % and the last is to add a average component to the results. % % Using a special type of '-1' will return all 9 pre-weighted kernels % as a multi-kernel list, so that you can use them directly (without % normalization) with the special "-set option:morphology:compose Plus" % setting to apply the full FreiChen Edge Detection Technique. % % If 'type' is large it will be taken to be an actual rotation angle for % the default FreiChen (type 0) kernel. As such FreiChen:45 will look % like a Sobel:45 but with 'sqrt(2)' instead of '2' values. % % WARNING: The above was layed out as per % http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf % But rotated 90 degrees so direction is from left rather than the top. % I have yet to find any secondary confirmation of the above. The only % other source found was actual source code at % http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf % Neigher paper defineds the kernels in a way that looks locical or % correct when taken as a whole. % % Boolean Kernels % % Diamond:[{radius}[,{scale}]] % Generate a diamond shaped kernel with given radius to the points. % Kernel size will again be radius*2+1 square and defaults to radius 1, % generating a 3x3 kernel that is slightly larger than a square. % % Square:[{radius}[,{scale}]] % Generate a square shaped kernel of size radius*2+1, and defaulting % to a 3x3 (radius 1). % % Octagon:[{radius}[,{scale}]] % Generate octagonal shaped kernel of given radius and constant scale. % Default radius is 3 producing a 7x7 kernel. A radius of 1 will result % in "Diamond" kernel. % % Disk:[{radius}[,{scale}]] % Generate a binary disk, thresholded at the radius given, the radius % may be a float-point value. Final Kernel size is floor(radius)*2+1 % square. A radius of 5.3 is the default. % % NOTE: That a low radii Disk kernels produce the same results as % many of the previously defined kernels, but differ greatly at larger % radii. Here is a table of equivalences... % "Disk:1" => "Diamond", "Octagon:1", or "Cross:1" % "Disk:1.5" => "Square" % "Disk:2" => "Diamond:2" % "Disk:2.5" => "Octagon" % "Disk:2.9" => "Square:2" % "Disk:3.5" => "Octagon:3" % "Disk:4.5" => "Octagon:4" % "Disk:5.4" => "Octagon:5" % "Disk:6.4" => "Octagon:6" % All other Disk shapes are unique to this kernel, but because a "Disk" % is more circular when using a larger radius, using a larger radius is % preferred over iterating the morphological operation. % % Rectangle:{geometry} % Simply generate a rectangle of 1's with the size given. You can also % specify the location of the 'control point', otherwise the closest % pixel to the center of the rectangle is selected. % % Properly centered and odd sized rectangles work the best. % % Symbol Dilation Kernels % % These kernel is not a good general morphological kernel, but is used % more for highlighting and marking any single pixels in an image using, % a "Dilate" method as appropriate. % % For the same reasons iterating these kernels does not produce the % same result as using a larger radius for the symbol. % % Plus:[{radius}[,{scale}]] % Cross:[{radius}[,{scale}]] % Generate a kernel in the shape of a 'plus' or a 'cross' with % a each arm the length of the given radius (default 2). % % NOTE: "plus:1" is equivalent to a "Diamond" kernel. % % Ring:{radius1},{radius2}[,{scale}] % A ring of the values given that falls between the two radii. % Defaults to a ring of approximataly 3 radius in a 7x7 kernel. % This is the 'edge' pixels of the default "Disk" kernel, % More specifically, "Ring" -> "Ring:2.5,3.5,1.0" % % Hit and Miss Kernels % % Peak:radius1,radius2 % Find any peak larger than the pixels the fall between the two radii. % The default ring of pixels is as per "Ring". % Edges % Find flat orthogonal edges of a binary shape % Corners % Find 90 degree corners of a binary shape % Diagonals:type % A special kernel to thin the 'outside' of diagonals % LineEnds:type % Find end points of lines (for pruning a skeletion) % Two types of lines ends (default to both) can be searched for % Type 0: All line ends % Type 1: single kernel for 4-conneected line ends % Type 2: single kernel for simple line ends % LineJunctions % Find three line junctions (within a skeletion) % Type 0: all line junctions % Type 1: Y Junction kernel % Type 2: Diagonal T Junction kernel % Type 3: Orthogonal T Junction kernel % Type 4: Diagonal X Junction kernel % Type 5: Orthogonal + Junction kernel % Ridges:type % Find single pixel ridges or thin lines % Type 1: Fine single pixel thick lines and ridges % Type 2: Find two pixel thick lines and ridges % ConvexHull % Octagonal Thickening Kernel, to generate convex hulls of 45 degrees % Skeleton:type % Traditional skeleton generating kernels. % Type 1: Tradional Skeleton kernel (4 connected skeleton) % Type 2: HIPR2 Skeleton kernel (8 connected skeleton) % Type 3: Thinning skeleton based on a ressearch paper by % Dan S. Bloomberg (Default Type) % ThinSE:type % A huge variety of Thinning Kernels designed to preserve conectivity. % many other kernel sets use these kernels as source definitions. % Type numbers are 41-49, 81-89, 481, and 482 which are based on % the super and sub notations used in the source research paper. % % Distance Measuring Kernels % % Different types of distance measuring methods, which are used with the % a 'Distance' morphology method for generating a gradient based on % distance from an edge of a binary shape, though there is a technique % for handling a anti-aliased shape. % % See the 'Distance' Morphological Method, for information of how it is % applied. % % Chebyshev:[{radius}][x{scale}[%!]] % Chebyshev Distance (also known as Tchebychev or Chessboard distance) % is a value of one to any neighbour, orthogonal or diagonal. One why % of thinking of it is the number of squares a 'King' or 'Queen' in % chess needs to traverse reach any other position on a chess board. % It results in a 'square' like distance function, but one where % diagonals are given a value that is closer than expected. % % Manhattan:[{radius}][x{scale}[%!]] % Manhattan Distance (also known as Rectilinear, City Block, or the Taxi % Cab distance metric), it is the distance needed when you can only % travel in horizontal or vertical directions only. It is the % distance a 'Rook' in chess would have to travel, and results in a % diamond like distances, where diagonals are further than expected. % % Octagonal:[{radius}][x{scale}[%!]] % An interleving of Manhatten and Chebyshev metrics producing an % increasing octagonally shaped distance. Distances matches those of % the "Octagon" shaped kernel of the same radius. The minimum radius % and default is 2, producing a 5x5 kernel. % % Euclidean:[{radius}][x{scale}[%!]] % Euclidean distance is the 'direct' or 'as the crow flys' distance. % However by default the kernel size only has a radius of 1, which % limits the distance to 'Knight' like moves, with only orthogonal and % diagonal measurements being correct. As such for the default kernel % you will get octagonal like distance function. % % However using a larger radius such as "Euclidean:4" you will get a % much smoother distance gradient from the edge of the shape. Especially % if the image is pre-processed to include any anti-aliasing pixels. % Of course a larger kernel is slower to use, and not always needed. % % The first three Distance Measuring Kernels will only generate distances % of exact multiples of {scale} in binary images. As such you can use a % scale of 1 without loosing any information. However you also need some % scaling when handling non-binary anti-aliased shapes. % % The "Euclidean" Distance Kernel however does generate a non-integer % fractional results, and as such scaling is vital even for binary shapes. % */ MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type, const GeometryInfo *args,ExceptionInfo *exception) { KernelInfo *kernel; register ssize_t i; register ssize_t u, v; double nan = sqrt((double)-1.0); /* Special Value : Not A Number */ /* Generate a new empty kernel if needed */ kernel=(KernelInfo *) NULL; switch(type) { case UndefinedKernel: /* These should not call this function */ case UserDefinedKernel: assert("Should not call this function" != (char *) NULL); break; case LaplacianKernel: /* Named Descrete Convolution Kernels */ case SobelKernel: /* these are defined using other kernels */ case RobertsKernel: case PrewittKernel: case CompassKernel: case KirschKernel: case FreiChenKernel: case EdgesKernel: /* Hit and Miss kernels */ case CornersKernel: case DiagonalsKernel: case LineEndsKernel: case LineJunctionsKernel: case RidgesKernel: case ConvexHullKernel: case SkeletonKernel: case ThinSEKernel: break; /* A pre-generated kernel is not needed */ #if 0 /* set to 1 to do a compile-time check that we haven't missed anything */ case UnityKernel: case GaussianKernel: case DoGKernel: case LoGKernel: case BlurKernel: case CometKernel: case BinomialKernel: case DiamondKernel: case SquareKernel: case RectangleKernel: case OctagonKernel: case DiskKernel: case PlusKernel: case CrossKernel: case RingKernel: case PeaksKernel: case ChebyshevKernel: case ManhattanKernel: case OctangonalKernel: case EuclideanKernel: #else default: #endif /* Generate the base Kernel Structure */ kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (kernel == (KernelInfo *) NULL) return(kernel); (void) memset(kernel,0,sizeof(*kernel)); kernel->minimum = kernel->maximum = kernel->angle = 0.0; kernel->negative_range = kernel->positive_range = 0.0; kernel->type = type; kernel->next = (KernelInfo *) NULL; kernel->signature=MagickCoreSignature; break; } switch(type) { /* Convolution Kernels */ case UnityKernel: { kernel->height = kernel->width = (size_t) 1; kernel->x = kernel->y = (ssize_t) 0; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(1,sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); kernel->maximum = kernel->values[0] = args->rho; break; } break; case GaussianKernel: case DoGKernel: case LoGKernel: { double sigma = fabs(args->sigma), sigma2 = fabs(args->xi), A, B, R; if ( args->rho >= 1.0 ) kernel->width = (size_t)args->rho*2+1; else if ( (type != DoGKernel) || (sigma >= sigma2) ) kernel->width = GetOptimalKernelWidth2D(args->rho,sigma); else kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2); kernel->height = kernel->width; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* WARNING: The following generates a 'sampled gaussian' kernel. * What we really want is a 'discrete gaussian' kernel. * * How to do this is I don't know, but appears to be basied on the * Error Function 'erf()' (intergral of a gaussian) */ if ( type == GaussianKernel || type == DoGKernel ) { /* Calculate a Gaussian, OR positive half of a DoG */ if ( sigma > MagickEpsilon ) { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ B = (double) (1.0/(Magick2PI*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B; } else /* limiting case - a unity (normalized Dirac) kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } } if ( type == DoGKernel ) { /* Subtract a Negative Gaussian for "Difference of Gaussian" */ if ( sigma2 > MagickEpsilon ) { sigma = sigma2; /* simplify loop expressions */ A = 1.0/(2.0*sigma*sigma); B = (double) (1.0/(Magick2PI*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B; } else /* limiting case - a unity (normalized Dirac) kernel */ kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0; } if ( type == LoGKernel ) { /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */ if ( sigma > MagickEpsilon ) { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { R = ((double)(u*u+v*v))*A; kernel->values[i] = (1-R)*exp(-R)*B; } } else /* special case - generate a unity kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } } /* Note the above kernels may have been 'clipped' by a user defined ** radius, producing a smaller (darker) kernel. Also for very small ** sigma's (> 0.1) the central value becomes larger than one, and thus ** producing a very bright kernel. ** ** Normalization will still be needed. */ /* Normalize the 2D Gaussian Kernel ** ** NB: a CorrelateNormalize performs a normal Normalize if ** there are no negative values. */ CalcKernelMetaData(kernel); /* the other kernel meta-data */ ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue); break; } case BlurKernel: { double sigma = fabs(args->sigma), alpha, beta; if ( args->rho >= 1.0 ) kernel->width = (size_t)args->rho*2+1; else kernel->width = GetOptimalKernelWidth1D(args->rho,sigma); kernel->height = 1; kernel->x = (ssize_t) (kernel->width-1)/2; kernel->y = 0; kernel->negative_range = kernel->positive_range = 0.0; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); #if 1 #define KernelRank 3 /* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix). ** It generates a gaussian 3 times the width, and compresses it into ** the expected range. This produces a closer normalization of the ** resulting kernel, especially for very low sigma values. ** As such while wierd it is prefered. ** ** I am told this method originally came from Photoshop. ** ** A properly normalized curve is generated (apart from edge clipping) ** even though we later normalize the result (for edge clipping) ** to allow the correct generation of a "Difference of Blurs". */ /* initialize */ v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */ (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); /* Calculate a Positive 1D Gaussian */ if ( sigma > MagickEpsilon ) { sigma *= KernelRank; /* simplify loop expressions */ alpha = 1.0/(2.0*sigma*sigma); beta= (double) (1.0/(MagickSQ2PI*sigma )); for ( u=-v; u <= v; u++) { kernel->values[(u+v)/KernelRank] += exp(-((double)(u*u))*alpha)*beta; } } else /* special case - generate a unity kernel */ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; #else /* Direct calculation without curve averaging This is equivelent to a KernelRank of 1 */ /* Calculate a Positive Gaussian */ if ( sigma > MagickEpsilon ) { alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ beta = 1.0/(MagickSQ2PI*sigma); for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = exp(-((double)(u*u))*alpha)*beta; } else /* special case - generate a unity kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } #endif /* Note the above kernel may have been 'clipped' by a user defined ** radius, producing a smaller (darker) kernel. Also for very small ** sigma's (> 0.1) the central value becomes larger than one, as a ** result of not generating a actual 'discrete' kernel, and thus ** producing a very bright 'impulse'. ** ** Becuase of these two factors Normalization is required! */ /* Normalize the 1D Gaussian Kernel ** ** NB: a CorrelateNormalize performs a normal Normalize if ** there are no negative values. */ CalcKernelMetaData(kernel); /* the other kernel meta-data */ ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue); /* rotate the 1D kernel by given angle */ RotateKernelInfo(kernel, args->xi ); break; } case CometKernel: { double sigma = fabs(args->sigma), A; if ( args->rho < 1.0 ) kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1; else kernel->width = (size_t)args->rho; kernel->x = kernel->y = 0; kernel->height = 1; kernel->negative_range = kernel->positive_range = 0.0; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* A comet blur is half a 1D gaussian curve, so that the object is ** blurred in one direction only. This may not be quite the right ** curve to use so may change in the future. The function must be ** normalised after generation, which also resolves any clipping. ** ** As we are normalizing and not subtracting gaussians, ** there is no need for a divisor in the gaussian formula ** ** It is less comples */ if ( sigma > MagickEpsilon ) { #if 1 #define KernelRank 3 v = (ssize_t) kernel->width*KernelRank; /* start/end points */ (void) memset(kernel->values,0, (size_t) kernel->width*sizeof(*kernel->values)); sigma *= KernelRank; /* simplify the loop expression */ A = 1.0/(2.0*sigma*sigma); /* B = 1.0/(MagickSQ2PI*sigma); */ for ( u=0; u < v; u++) { kernel->values[u/KernelRank] += exp(-((double)(u*u))*A); /* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */ } for (i=0; i < (ssize_t) kernel->width; i++) kernel->positive_range += kernel->values[i]; #else A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */ /* B = 1.0/(MagickSQ2PI*sigma); */ for ( i=0; i < (ssize_t) kernel->width; i++) kernel->positive_range += kernel->values[i] = exp(-((double)(i*i))*A); /* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */ #endif } else /* special case - generate a unity kernel */ { (void) memset(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; kernel->positive_range = 1.0; } kernel->minimum = 0.0; kernel->maximum = kernel->values[0]; kernel->negative_range = 0.0; ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */ RotateKernelInfo(kernel, args->xi); /* Rotate by angle */ break; } case BinomialKernel: { size_t order_f; if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; order_f = fact(kernel->width-1); kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values within diamond area to scale given */ for ( i=0, v=0; v < (ssize_t)kernel->height; v++) { size_t alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) ); for ( u=0; u < (ssize_t)kernel->width; u++, i++) kernel->positive_range += kernel->values[i] = (double) (alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) )); } kernel->minimum = 1.0; kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width]; kernel->negative_range = 0.0; break; } /* Convolution Kernels - Well Known Named Constant Kernels */ case LaplacianKernel: { switch ( (int) args->rho ) { case 0: default: /* laplacian square filter -- default */ kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1"); break; case 1: /* laplacian diamond filter */ kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0"); break; case 2: kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2"); break; case 3: kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1"); break; case 5: /* a 5x5 laplacian */ kernel=ParseKernelArray( "5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4"); break; case 7: /* a 7x7 laplacian */ kernel=ParseKernelArray( "7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" ); break; case 15: /* a 5x5 LoG (sigma approx 1.4) */ kernel=ParseKernelArray( "5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0"); break; case 19: /* a 9x9 LoG (sigma approx 1.4) */ /* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */ kernel=ParseKernelArray( "9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; break; } case SobelKernel: { /* Simple Sobel Kernel */ kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case RobertsKernel: { kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case PrewittKernel: { kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case CompassKernel: { kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case KirschKernel: { kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case FreiChenKernel: /* Direction is set to be left to right positive */ /* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */ /* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */ { switch ( (int) args->rho ) { default: case 0: kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[3] = +(MagickRealType) MagickSQ2; kernel->values[5] = -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ break; case 2: kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[1] = kernel->values[3]= +(MagickRealType) MagickSQ2; kernel->values[5] = kernel->values[7]= -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 10: { kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19",exception); if (kernel == (KernelInfo *) NULL) return(kernel); break; } case 1: case 11: kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[3] = +(MagickRealType) MagickSQ2; kernel->values[5] = -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 12: kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[1] = +(MagickRealType) MagickSQ2; kernel->values[7] = +(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 13: kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[0] = +(MagickRealType) MagickSQ2; kernel->values[8] = -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 14: kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[2] = -(MagickRealType) MagickSQ2; kernel->values[6] = +(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 15: kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/2.0, NoValue); break; case 16: kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/2.0, NoValue); break; case 17: kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/6.0, NoValue); break; case 18: kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/6.0, NoValue); break; case 19: kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/3.0, NoValue); break; } if ( fabs(args->sigma) >= MagickEpsilon ) /* Rotate by correctly supplied 'angle' */ RotateKernelInfo(kernel, args->sigma); else if ( args->rho > 30.0 || args->rho < -30.0 ) /* Rotate by out of bounds 'type' */ RotateKernelInfo(kernel, args->rho); break; } /* Boolean or Shaped Kernels */ case DiamondKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values within diamond area to scale given */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case SquareKernel: case RectangleKernel: { double scale; if ( type == SquareKernel ) { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = (size_t) (2*args->rho+1); kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; scale = args->sigma; } else { /* NOTE: user defaults set in "AcquireKernelInfo()" */ if ( args->rho < 1.0 || args->sigma < 1.0 ) return(DestroyKernelInfo(kernel)); /* invalid args given */ kernel->width = (size_t)args->rho; kernel->height = (size_t)args->sigma; if ( args->xi < 0.0 || args->xi > (double)kernel->width || args->psi < 0.0 || args->psi > (double)kernel->height ) return(DestroyKernelInfo(kernel)); /* invalid args given */ kernel->x = (ssize_t) args->xi; kernel->y = (ssize_t) args->psi; scale = 1.0; } kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values to scale given */ u=(ssize_t) (kernel->width*kernel->height); for ( i=0; i < u; i++) kernel->values[i] = scale; kernel->minimum = kernel->maximum = scale; /* a flat shape */ kernel->positive_range = scale*u; break; } case OctagonKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius = 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ( (labs((long) u)+labs((long) v)) <= ((long)kernel->x + (long)(kernel->x/2)) ) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case DiskKernel: { ssize_t limit = (ssize_t)(args->rho*args->rho); if (args->rho < 0.4) /* default radius approx 4.3 */ kernel->width = kernel->height = 9L, limit = 18L; else kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ((u*u+v*v) <= limit) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case PlusKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values along axises to given scale */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0); break; } case CrossKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values along axises to given scale */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = (u == v || u == -v) ? args->sigma : nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0); break; } /* HitAndMiss Kernels */ case RingKernel: case PeaksKernel: { ssize_t limit1, limit2, scale; if (args->rho < args->sigma) { kernel->width = ((size_t)args->sigma)*2+1; limit1 = (ssize_t)(args->rho*args->rho); limit2 = (ssize_t)(args->sigma*args->sigma); } else { kernel->width = ((size_t)args->rho)*2+1; limit1 = (ssize_t)(args->sigma*args->sigma); limit2 = (ssize_t)(args->rho*args->rho); } if ( limit2 <= 0 ) kernel->width = 7L, limit1 = 7L, limit2 = 11L; kernel->height = kernel->width; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */ scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi); for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { ssize_t radius=u*u+v*v; if (limit1 < radius && radius <= limit2) kernel->positive_range += kernel->values[i] = (double) scale; else kernel->values[i] = nan; } kernel->minimum = kernel->maximum = (double) scale; if ( type == PeaksKernel ) { /* set the central point in the middle */ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; kernel->positive_range = 1.0; kernel->maximum = 1.0; } break; } case EdgesKernel: { kernel=AcquireKernelInfo("ThinSE:482",exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */ break; } case CornersKernel: { kernel=AcquireKernelInfo("ThinSE:87",exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */ break; } case DiagonalsKernel: { switch ( (int) args->rho ) { case 0: default: { KernelInfo *new_kernel; kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; ExpandMirrorKernelInfo(kernel); return(kernel); } case 1: kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-"); break; case 2: kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case LineEndsKernel: { /* Kernels for finding the end of thin lines */ switch ( (int) args->rho ) { case 0: default: /* set of kernels to find all end of lines */ return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>",exception)); case 1: /* kernel for 4-connected line ends - no rotation */ kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-"); break; case 2: /* kernel to add for 8-connected lines - no rotation */ kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1"); break; case 3: /* kernel to add for orthogonal line ends - does not find corners */ kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0"); break; case 4: /* traditional line end - fails on last T end */ kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case LineJunctionsKernel: { /* kernels for finding the junctions of multiple lines */ switch ( (int) args->rho ) { case 0: default: /* set of kernels to find all line junctions */ return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>",exception)); case 1: /* Y Junction */ kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-"); break; case 2: /* Diagonal T Junctions */ kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1"); break; case 3: /* Orthogonal T Junctions */ kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-"); break; case 4: /* Diagonal X Junctions */ kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1"); break; case 5: /* Orthogonal X Junctions - minimal diamond kernel */ kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case RidgesKernel: { /* Ridges - Ridge finding kernels */ KernelInfo *new_kernel; switch ( (int) args->rho ) { case 1: default: kernel=ParseKernelArray("3x1:0,1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */ break; case 2: kernel=ParseKernelArray("4x1:0,1,1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */ /* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */ /* Unfortunatally we can not yet rotate a non-square kernel */ /* But then we can't flip a non-symetrical kernel either */ new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; break; } break; } case ConvexHullKernel: { KernelInfo *new_kernel; /* first set of 8 kernels */ kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* append the mirror versions too - no flip function yet */ new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; ExpandRotateKernelInfo(new_kernel, 90.0); LastKernelInfo(kernel)->next = new_kernel; break; } case SkeletonKernel: { switch ( (int) args->rho ) { case 1: default: /* Traditional Skeleton... ** A cyclically rotated single kernel */ kernel=AcquireKernelInfo("ThinSE:482",exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */ break; case 2: /* HIPR Variation of the cyclic skeleton ** Corners of the traditional method made more forgiving, ** but the retain the same cyclic order. */ kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;",exception); if (kernel == (KernelInfo *) NULL) return(kernel); if (kernel->next == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); kernel->type = type; kernel->next->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */ break; case 3: /* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's ** "Connectivity-Preserving Morphological Image Thransformations" ** by Dan S. Bloomberg, available on Leptonica, Selected Papers, ** http://www.leptonica.com/papers/conn.pdf */ kernel=AcquireKernelInfo("ThinSE:41; ThinSE:42; ThinSE:43", exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->next->type = type; kernel->next->next->type = type; ExpandMirrorKernelInfo(kernel); /* 12 kernels total */ break; } break; } case ThinSEKernel: { /* Special kernels for general thinning, while preserving connections ** "Connectivity-Preserving Morphological Image Thransformations" ** by Dan S. Bloomberg, available on Leptonica, Selected Papers, ** http://www.leptonica.com/papers/conn.pdf ** And ** http://tpgit.github.com/Leptonica/ccthin_8c_source.html ** ** Note kernels do not specify the origin pixel, allowing them ** to be used for both thickening and thinning operations. */ switch ( (int) args->rho ) { /* SE for 4-connected thinning */ case 41: /* SE_4_1 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1"); break; case 42: /* SE_4_2 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-"); break; case 43: /* SE_4_3 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1"); break; case 44: /* SE_4_4 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-"); break; case 45: /* SE_4_5 */ kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-"); break; case 46: /* SE_4_6 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1"); break; case 47: /* SE_4_7 */ kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-"); break; case 48: /* SE_4_8 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1"); break; case 49: /* SE_4_9 */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1"); break; /* SE for 8-connected thinning - negatives of the above */ case 81: /* SE_8_0 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-"); break; case 82: /* SE_8_2 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-"); break; case 83: /* SE_8_3 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-"); break; case 84: /* SE_8_4 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-"); break; case 85: /* SE_8_5 */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-"); break; case 86: /* SE_8_6 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1"); break; case 87: /* SE_8_7 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-"); break; case 88: /* SE_8_8 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-"); break; case 89: /* SE_8_9 */ kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-"); break; /* Special combined SE kernels */ case 423: /* SE_4_2 , SE_4_3 Combined Kernel */ kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-"); break; case 823: /* SE_8_2 , SE_8_3 Combined Kernel */ kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-"); break; case 481: /* SE_48_1 - General Connected Corner Kernel */ kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-"); break; default: case 482: /* SE_48_2 - General Edge Kernel */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } /* Distance Measuring Kernels */ case ChebyshevKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*MagickMax(fabs((double)u),fabs((double)v)) ); kernel->maximum = kernel->values[0]; break; } case ManhattanKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*(labs((long) u)+labs((long) v)) ); kernel->maximum = kernel->values[0]; break; } case OctagonalKernel: { if (args->rho < 2.0) kernel->width = kernel->height = 5; /* default/minimum radius = 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { double r1 = MagickMax(fabs((double)u),fabs((double)v)), r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5); kernel->positive_range += kernel->values[i] = args->sigma*MagickMax(r1,r2); } kernel->maximum = kernel->values[0]; break; } case EuclideanKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*sqrt((double)(u*u+v*v)) ); kernel->maximum = kernel->values[0]; break; } default: { /* No-Op Kernel - Basically just a single pixel on its own */ kernel=ParseKernelArray("1:1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = UndefinedKernel; break; } break; } return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneKernelInfo() creates a new clone of the given Kernel List so that its % can be modified without effecting the original. The cloned kernel should % be destroyed using DestoryKernelInfo() when no longer needed. % % The format of the CloneKernelInfo method is: % % KernelInfo *CloneKernelInfo(const KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to be cloned % */ MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel) { register ssize_t i; KernelInfo *new_kernel; assert(kernel != (KernelInfo *) NULL); new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (new_kernel == (KernelInfo *) NULL) return(new_kernel); *new_kernel=(*kernel); /* copy values in structure */ /* replace the values with a copy of the values */ new_kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height*sizeof(*kernel->values))); if (new_kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(new_kernel)); for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) new_kernel->values[i]=kernel->values[i]; /* Also clone the next kernel in the kernel list */ if ( kernel->next != (KernelInfo *) NULL ) { new_kernel->next = CloneKernelInfo(kernel->next); if ( new_kernel->next == (KernelInfo *) NULL ) return(DestroyKernelInfo(new_kernel)); } return(new_kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyKernelInfo() frees the memory used by a Convolution/Morphology % kernel. % % The format of the DestroyKernelInfo method is: % % KernelInfo *DestroyKernelInfo(KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to be destroyed % */ MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel) { assert(kernel != (KernelInfo *) NULL); if (kernel->next != (KernelInfo *) NULL) kernel->next=DestroyKernelInfo(kernel->next); kernel->values=(MagickRealType *) RelinquishAlignedMemory(kernel->values); kernel=(KernelInfo *) RelinquishMagickMemory(kernel); return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d M i r r o r K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandMirrorKernelInfo() takes a single kernel, and expands it into a % sequence of 90-degree rotated kernels but providing a reflected 180 % rotatation, before the -/+ 90-degree rotations. % % This special rotation order produces a better, more symetrical thinning of % objects. % % The format of the ExpandMirrorKernelInfo method is: % % void ExpandMirrorKernelInfo(KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % This function is only internel to this module, as it is not finalized, % especially with regard to non-orthogonal angles, and rotation of larger % 2D kernels. */ #if 0 static void FlopKernelInfo(KernelInfo *kernel) { /* Do a Flop by reversing each row. */ size_t y; register ssize_t x,r; register double *k,t; for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width) for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--) t=k[x], k[x]=k[r], k[r]=t; kernel->x = kernel->width - kernel->x - 1; angle = fmod(angle+180.0, 360.0); } #endif static void ExpandMirrorKernelInfo(KernelInfo *kernel) { KernelInfo *clone, *last; last = kernel; clone = CloneKernelInfo(last); if (clone == (KernelInfo *) NULL) return; RotateKernelInfo(clone, 180); /* flip */ LastKernelInfo(last)->next = clone; last = clone; clone = CloneKernelInfo(last); if (clone == (KernelInfo *) NULL) return; RotateKernelInfo(clone, 90); /* transpose */ LastKernelInfo(last)->next = clone; last = clone; clone = CloneKernelInfo(last); if (clone == (KernelInfo *) NULL) return; RotateKernelInfo(clone, 180); /* flop */ LastKernelInfo(last)->next = clone; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d R o t a t e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating % incrementally by the angle given, until the kernel repeats. % % WARNING: 45 degree rotations only works for 3x3 kernels. % While 90 degree roatations only works for linear and square kernels % % The format of the ExpandRotateKernelInfo method is: % % void ExpandRotateKernelInfo(KernelInfo *kernel, double angle) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o angle: angle to rotate in degrees % % This function is only internel to this module, as it is not finalized, % especially with regard to non-orthogonal angles, and rotation of larger % 2D kernels. */ /* Internal Routine - Return true if two kernels are the same */ static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1, const KernelInfo *kernel2) { register size_t i; /* check size and origin location */ if ( kernel1->width != kernel2->width || kernel1->height != kernel2->height || kernel1->x != kernel2->x || kernel1->y != kernel2->y ) return MagickFalse; /* check actual kernel values */ for (i=0; i < (kernel1->width*kernel1->height); i++) { /* Test for Nan equivalence */ if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) ) return MagickFalse; if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) ) return MagickFalse; /* Test actual values are equivalent */ if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon ) return MagickFalse; } return MagickTrue; } static void ExpandRotateKernelInfo(KernelInfo *kernel,const double angle) { KernelInfo *clone_info, *last; clone_info=(KernelInfo *) NULL; last=kernel; DisableMSCWarning(4127) while (1) { RestoreMSCWarning clone_info=CloneKernelInfo(last); if (clone_info == (KernelInfo *) NULL) break; RotateKernelInfo(clone_info,angle); if (SameKernelInfo(kernel,clone_info) != MagickFalse) break; LastKernelInfo(last)->next=clone_info; last=clone_info; } if (clone_info != (KernelInfo *) NULL) clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a l c M e t a K e r n a l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only, % using the kernel values. This should only ne used if it is not possible to % calculate that meta-data in some easier way. % % It is important that the meta-data is correct before ScaleKernelInfo() is % used to perform kernel normalization. % % The format of the CalcKernelMetaData method is: % % void CalcKernelMetaData(KernelInfo *kernel, const double scale ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to modify % % WARNING: Minimum and Maximum values are assumed to include zero, even if % zero is not part of the kernel (as in Gaussian Derived kernels). This % however is not true for flat-shaped morphological kernels. % % WARNING: Only the specific kernel pointed to is modified, not a list of % multiple kernels. % % This is an internal function and not expected to be useful outside this % module. This could change however. */ static void CalcKernelMetaData(KernelInfo *kernel) { register size_t i; kernel->minimum = kernel->maximum = 0.0; kernel->negative_range = kernel->positive_range = 0.0; for (i=0; i < (kernel->width*kernel->height); i++) { if ( fabs(kernel->values[i]) < MagickEpsilon ) kernel->values[i] = 0.0; ( kernel->values[i] < 0) ? ( kernel->negative_range += kernel->values[i] ) : ( kernel->positive_range += kernel->values[i] ); Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); } return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h o l o g y A p p l y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MorphologyApply() applies a morphological method, multiple times using % a list of multiple kernels. This is the method that should be called by % other 'operators' that internally use morphology operations as part of % their processing. % % It is basically equivalent to as MorphologyImage() (see below) but without % any user controls. This allows internel programs to use this method to % perform a specific task without possible interference by any API user % supplied settings. % % It is MorphologyImage() task to extract any such user controls, and % pass them to this function for processing. % % More specifically all given kernels should already be scaled, normalised, % and blended appropriatally before being parred to this routine. The % appropriate bias, and compose (typically 'UndefinedComposeOp') given. % % The format of the MorphologyApply method is: % % Image *MorphologyApply(const Image *image,MorphologyMethod method, % const ssize_t iterations,const KernelInfo *kernel, % const CompositeMethod compose,const double bias, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the source image % % o method: the morphology method to be applied. % % o iterations: apply the operation this many times (or no change). % A value of -1 means loop until no change found. % How this is applied may depend on the morphology method. % Typically this is a value of 1. % % o channel: the channel type. % % o kernel: An array of double representing the morphology kernel. % % o compose: How to handle or merge multi-kernel results. % If 'UndefinedCompositeOp' use default for the Morphology method. % If 'NoCompositeOp' force image to be re-iterated by each kernel. % Otherwise merge the results using the compose method given. % % o bias: Convolution Output Bias. % % o exception: return any errors or warnings in this structure. % */ static ssize_t MorphologyPrimitive(const Image *image,Image *morphology_image, const MorphologyMethod method,const KernelInfo *kernel,const double bias, ExceptionInfo *exception) { #define MorphologyTag "Morphology/Image" CacheView *image_view, *morphology_view; OffsetInfo offset; register ssize_t j, y; size_t *changes, changed, width; MagickBooleanType status; MagickOffsetType progress; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(morphology_image != (Image *) NULL); assert(morphology_image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); morphology_view=AcquireAuthenticCacheView(morphology_image,exception); width=image->columns+kernel->width-1; offset.x=0; offset.y=0; switch (method) { case ConvolveMorphology: case DilateMorphology: case DilateIntensityMorphology: case IterativeDistanceMorphology: { /* Kernel needs to used with reflection about origin. */ offset.x=(ssize_t) kernel->width-kernel->x-1; offset.y=(ssize_t) kernel->height-kernel->y-1; break; } case ErodeMorphology: case ErodeIntensityMorphology: case HitAndMissMorphology: case ThinningMorphology: case ThickenMorphology: { offset.x=kernel->x; offset.y=kernel->y; break; } default: { assert("Not a Primitive Morphology Method" != (char *) NULL); break; } } changed=0; changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(), sizeof(*changes)); if (changes == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++) changes[j]=0; if ((method == ConvolveMorphology) && (kernel->width == 1)) { register ssize_t x; /* Special handling (for speed) of vertical (blur) kernels. This performs its handling in columns rather than in rows. This is only done for convolve as it is the only method that generates very large 1-D vertical kernels (such as a 'BlurKernel') */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,morphology_image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t r; ssize_t center; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,x,-offset.y,1,image->rows+ kernel->height-1,exception); q=GetCacheViewAuthenticPixels(morphology_view,x,0,1, morphology_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*offset.y; for (r=0; r < (ssize_t) image->rows; r++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait morphology_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t v; size_t count; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); morphology_traits=GetPixelChannelTraits(morphology_image,channel); if ((traits == UndefinedPixelTrait) || (morphology_traits == UndefinedPixelTrait)) continue; if ((traits & CopyPixelTrait) != 0) { SetPixelChannel(morphology_image,channel,p[center+i],q); continue; } k=(&kernel->values[kernel->height-1]); pixels=p; pixel=bias; gamma=0.0; count=0; if (((image->alpha_trait & BlendPixelTrait) == 0) || ((morphology_traits & BlendPixelTrait) == 0)) for (v=0; v < (ssize_t) kernel->height; v++) { if (!IsNaN(*k)) { pixel+=(*k)*pixels[i]; gamma+=(*k); count++; } k--; pixels+=GetPixelChannels(image); } else for (v=0; v < (ssize_t) kernel->height; v++) { if (!IsNaN(*k)) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=alpha*(*k)*pixels[i]; gamma+=alpha*(*k); count++; } k--; pixels+=GetPixelChannels(image); } if (fabs(pixel-p[center+i]) > MagickEpsilon) changes[id]++; gamma=PerceptibleReciprocal(gamma); if (count != 0) gamma*=(double) kernel->height/count; SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma* pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(morphology_image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MorphologyTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_image->type=image->type; morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++) changed+=changes[j]; changes=(size_t *) RelinquishMagickMemory(changes); return(status ? (ssize_t) changed : 0); } /* Normal handling of horizontal or rectangular kernels (row by row). */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,morphology_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; ssize_t center; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width, kernel->height,exception); q=GetCacheViewAuthenticPixels(morphology_view,0,y,morphology_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) (GetPixelChannels(image)*width*offset.y+ GetPixelChannels(image)*offset.x); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, intensity, maximum, minimum, pixel; PixelChannel channel; PixelTrait morphology_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels, *magick_restrict quantum_pixels; register ssize_t u; size_t count; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); morphology_traits=GetPixelChannelTraits(morphology_image,channel); if ((traits == UndefinedPixelTrait) || (morphology_traits == UndefinedPixelTrait)) continue; if ((traits & CopyPixelTrait) != 0) { SetPixelChannel(morphology_image,channel,p[center+i],q); continue; } pixels=p; quantum_pixels=(const Quantum *) NULL; maximum=0.0; minimum=(double) QuantumRange; switch (method) { case ConvolveMorphology: { pixel=bias; break; } case DilateMorphology: case ErodeIntensityMorphology: { pixel=0.0; break; } case HitAndMissMorphology: case ErodeMorphology: { pixel=QuantumRange; break; } default: { pixel=(double) p[center+i]; break; } } count=0; gamma=1.0; switch (method) { case ConvolveMorphology: { /* Weighted Average of pixels using reflected kernel For correct working of this operation for asymetrical kernels, the kernel needs to be applied in its reflected form. That is its values needs to be reversed. Correlation is actually the same as this but without reflecting the kernel, and thus 'lower-level' that Convolution. However as Convolution is the more common method used, and it does not really cost us much in terms of processing to use a reflected kernel, so it is Convolution that is implemented. Correlation will have its kernel reflected before calling this function to do a Convolve. For more details of Correlation vs Convolution see http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf */ k=(&kernel->values[kernel->width*kernel->height-1]); if (((image->alpha_trait & BlendPixelTrait) == 0) || ((morphology_traits & BlendPixelTrait) == 0)) { /* No alpha blending. */ for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { pixel+=(*k)*pixels[i]; count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } /* Alpha blending. */ gamma=0.0; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=alpha*(*k)*pixels[i]; gamma+=alpha*(*k); count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case ErodeMorphology: { /* Minimum value within kernel neighbourhood. The kernel is not reflected for this operation. In normal Greyscale Morphology, the kernel value should be added to the real value, this is currently not done, due to the nature of the boolean kernels being used. */ k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k >= 0.5)) { if ((double) pixels[i] < pixel) pixel=(double) pixels[i]; } k++; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case DilateMorphology: { /* Maximum value within kernel neighbourhood. For correct working of this operation for asymetrical kernels, the kernel needs to be applied in its reflected form. That is its values needs to be reversed. In normal Greyscale Morphology, the kernel value should be added to the real value, this is currently not done, due to the nature of the boolean kernels being used. */ k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k > 0.5)) { if ((double) pixels[i] > pixel) pixel=(double) pixels[i]; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case HitAndMissMorphology: case ThinningMorphology: case ThickenMorphology: { /* Minimum of foreground pixel minus maxumum of background pixels. The kernel is not reflected for this operation, and consists of both foreground and background pixel neighbourhoods, 0.0 for background, and 1.0 for foreground with either Nan or 0.5 values for don't care. This never produces a meaningless negative result. Such results cause Thinning/Thicken to not work correctly when used against a greyscale image. */ k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if (*k > 0.7) { if ((double) pixels[i] < pixel) pixel=(double) pixels[i]; } else if (*k < 0.3) { if ((double) pixels[i] > maximum) maximum=(double) pixels[i]; } count++; } k++; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } pixel-=maximum; if (pixel < 0.0) pixel=0.0; if (method == ThinningMorphology) pixel=(double) p[center+i]-pixel; else if (method == ThickenMorphology) pixel+=(double) p[center+i]+pixel; break; } case ErodeIntensityMorphology: { /* Select pixel with minimum intensity within kernel neighbourhood. The kernel is not reflected for this operation. */ k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k >= 0.5)) { intensity=(double) GetPixelIntensity(image,pixels); if (intensity < minimum) { quantum_pixels=pixels; pixel=(double) pixels[i]; minimum=intensity; } count++; } k++; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case DilateIntensityMorphology: { /* Select pixel with maximum intensity within kernel neighbourhood. The kernel is not reflected for this operation. */ k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k >= 0.5)) { intensity=(double) GetPixelIntensity(image,pixels); if (intensity > maximum) { pixel=(double) pixels[i]; quantum_pixels=pixels; maximum=intensity; } count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case IterativeDistanceMorphology: { /* Compute th iterative distance from black edge of a white image shape. Essentually white values are decreased to the smallest 'distance from edge' it can find. It works by adding kernel values to the neighbourhood, and and select the minimum value found. The kernel is rotated before use, so kernel distances match resulting distances, when a user provided asymmetric kernel is applied. This code is nearly identical to True GrayScale Morphology but not quite. GreyDilate Kernel values added, maximum value found Kernel is rotated before use. GrayErode: Kernel values subtracted and minimum value found No kernel rotation used. Note the the Iterative Distance method is essentially a GrayErode, but with negative kernel values, and kernel rotation applied. */ k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case UndefinedMorphology: default: break; } if (fabs(pixel-p[center+i]) > MagickEpsilon) changes[id]++; if (quantum_pixels != (const Quantum *) NULL) { SetPixelChannel(morphology_image,channel,quantum_pixels[i],q); continue; } gamma=PerceptibleReciprocal(gamma); if (count != 0) gamma*=(double) kernel->height*kernel->width/count; SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(morphology_image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MorphologyTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++) changed+=changes[j]; changes=(size_t *) RelinquishMagickMemory(changes); return(status ? (ssize_t) changed : -1); } /* This is almost identical to the MorphologyPrimative() function above, but applies the primitive directly to the actual image using two passes, once in each direction, with the results of the previous (and current) row being re-used. That is after each row is 'Sync'ed' into the image, the next row makes use of those values as part of the calculation of the next row. It repeats, but going in the oppisite (bottom-up) direction. Because of this 're-use of results' this function can not make use of multi- threaded, parellel processing. */ static ssize_t MorphologyPrimitiveDirect(Image *image, const MorphologyMethod method,const KernelInfo *kernel, ExceptionInfo *exception) { CacheView *morphology_view, *image_view; MagickBooleanType status; MagickOffsetType progress; OffsetInfo offset; size_t width, changed; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=MagickTrue; changed=0; progress=0; switch(method) { case DistanceMorphology: case VoronoiMorphology: { /* Kernel reflected about origin. */ offset.x=(ssize_t) kernel->width-kernel->x-1; offset.y=(ssize_t) kernel->height-kernel->y-1; break; } default: { offset.x=kernel->x; offset.y=kernel->y; break; } } /* Two views into same image, do not thread. */ image_view=AcquireVirtualCacheView(image,exception); morphology_view=AcquireAuthenticCacheView(image,exception); width=image->columns+kernel->width-1; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; /* Read virtual pixels, and authentic pixels, from the same image! We read using virtual to get virtual pixel handling, but write back into the same image. Only top half of kernel is processed as we do a single pass downward through the image iterating the distance function as we go. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,(size_t) offset.y+1,exception); q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel; PixelTrait traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & CopyPixelTrait) != 0) continue; pixels=p; pixel=(double) QuantumRange; switch (method) { case DistanceMorphology: { k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v <= offset.y; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*(kernel->y+1)-1]); pixels=q-offset.x*GetPixelChannels(image); for (u=0; u < offset.x; u++) { if (!IsNaN(*k) && ((x+u-offset.x) >= 0)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } break; } case VoronoiMorphology: { k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < offset.y; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*(kernel->y+1)-1]); pixels=q-offset.x*GetPixelChannels(image); for (u=0; u < offset.x; u++) { if (!IsNaN(*k) && ((x+u-offset.x) >= 0)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } break; } default: break; } if (fabs(pixel-q[i]) > MagickEpsilon) changed++; q[i]=ClampToQuantum(pixel); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MorphologyTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); /* Do the reverse pass through the image. */ image_view=AcquireVirtualCacheView(image,exception); morphology_view=AcquireAuthenticCacheView(image,exception); for (y=(ssize_t) image->rows-1; y >= 0; y--) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; /* Read virtual pixels, and authentic pixels, from the same image. We read using virtual to get virtual pixel handling, but write back into the same image. Only the bottom half of the kernel is processed as we up the image. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-offset.x,y,width,(size_t) kernel->y+1,exception); q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } p+=(image->columns-1)*GetPixelChannels(image); q+=(image->columns-1)*GetPixelChannels(image); for (x=(ssize_t) image->columns-1; x >= 0; x--) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel; PixelTrait traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; if ((traits & CopyPixelTrait) != 0) continue; pixels=p; pixel=(double) QuantumRange; switch (method) { case DistanceMorphology: { k=(&kernel->values[kernel->width*(kernel->y+1)-1]); for (v=offset.y; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*kernel->y+kernel->x-1]); pixels=q; for (u=offset.x+1; u < (ssize_t) kernel->width; u++) { pixels+=GetPixelChannels(image); if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; } break; } case VoronoiMorphology: { k=(&kernel->values[kernel->width*(kernel->y+1)-1]); for (v=offset.y; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*(kernel->y+1)-1]); pixels=q; for (u=offset.x+1; u < (ssize_t) kernel->width; u++) { pixels+=GetPixelChannels(image); if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; } break; } default: break; } if (fabs(pixel-q[i]) > MagickEpsilon) changed++; q[i]=ClampToQuantum(pixel); } p-=GetPixelChannels(image); q-=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MorphologyTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); return(status ? (ssize_t) changed : -1); } /* Apply a Morphology by calling one of the above low level primitive application functions. This function handles any iteration loops, composition or re-iteration of results, and compound morphology methods that is based on multiple low-level (staged) morphology methods. Basically this provides the complex glue between the requested morphology method and raw low-level implementation (above). */ MagickPrivate Image *MorphologyApply(const Image *image, const MorphologyMethod method, const ssize_t iterations, const KernelInfo *kernel, const CompositeOperator compose,const double bias, ExceptionInfo *exception) { CompositeOperator curr_compose; Image *curr_image, /* Image we are working with or iterating */ *work_image, /* secondary image for primitive iteration */ *save_image, /* saved image - for 'edge' method only */ *rslt_image; /* resultant image - after multi-kernel handling */ KernelInfo *reflected_kernel, /* A reflected copy of the kernel (if needed) */ *norm_kernel, /* the current normal un-reflected kernel */ *rflt_kernel, /* the current reflected kernel (if needed) */ *this_kernel; /* the kernel being applied */ MorphologyMethod primitive; /* the current morphology primitive being applied */ CompositeOperator rslt_compose; /* multi-kernel compose method for results to use */ MagickBooleanType special, /* do we use a direct modify function? */ verbose; /* verbose output of results */ size_t method_loop, /* Loop 1: number of compound method iterations (norm 1) */ method_limit, /* maximum number of compound method iterations */ kernel_number, /* Loop 2: the kernel number being applied */ stage_loop, /* Loop 3: primitive loop for compound morphology */ stage_limit, /* how many primitives are in this compound */ kernel_loop, /* Loop 4: iterate the kernel over image */ kernel_limit, /* number of times to iterate kernel */ count, /* total count of primitive steps applied */ kernel_changed, /* total count of changed using iterated kernel */ method_changed; /* total count of changed over method iteration */ ssize_t changed; /* number pixels changed by last primitive operation */ char v_info[MagickPathExtent]; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); count = 0; /* number of low-level morphology primitives performed */ if ( iterations == 0 ) return((Image *) NULL); /* null operation - nothing to do! */ kernel_limit = (size_t) iterations; if ( iterations < 0 ) /* negative interations = infinite (well alomst) */ kernel_limit = image->columns>image->rows ? image->columns : image->rows; verbose = IsStringTrue(GetImageArtifact(image,"debug")); /* initialise for cleanup */ curr_image = (Image *) image; curr_compose = image->compose; (void) curr_compose; work_image = save_image = rslt_image = (Image *) NULL; reflected_kernel = (KernelInfo *) NULL; /* Initialize specific methods * + which loop should use the given iteratations * + how many primitives make up the compound morphology * + multi-kernel compose method to use (by default) */ method_limit = 1; /* just do method once, unless otherwise set */ stage_limit = 1; /* assume method is not a compound */ special = MagickFalse; /* assume it is NOT a direct modify primitive */ rslt_compose = compose; /* and we are composing multi-kernels as given */ switch( method ) { case SmoothMorphology: /* 4 primitive compound morphology */ stage_limit = 4; break; case OpenMorphology: /* 2 primitive compound morphology */ case OpenIntensityMorphology: case TopHatMorphology: case CloseMorphology: case CloseIntensityMorphology: case BottomHatMorphology: case EdgeMorphology: stage_limit = 2; break; case HitAndMissMorphology: rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */ /* FALL THUR */ case ThinningMorphology: case ThickenMorphology: method_limit = kernel_limit; /* iterate the whole method */ kernel_limit = 1; /* do not do kernel iteration */ break; case DistanceMorphology: case VoronoiMorphology: special = MagickTrue; /* use special direct primative */ break; default: break; } /* Apply special methods with special requirments ** For example, single run only, or post-processing requirements */ if ( special != MagickFalse ) { rslt_image=CloneImage(image,0,0,MagickTrue,exception); if (rslt_image == (Image *) NULL) goto error_cleanup; if (SetImageStorageClass(rslt_image,DirectClass,exception) == MagickFalse) goto error_cleanup; changed=MorphologyPrimitiveDirect(rslt_image,method,kernel,exception); if (verbose != MagickFalse) (void) (void) FormatLocaleFile(stderr, "%s:%.20g.%.20g #%.20g => Changed %.20g\n", CommandOptionToMnemonic(MagickMorphologyOptions, method), 1.0,0.0,1.0, (double) changed); if ( changed < 0 ) goto error_cleanup; if ( method == VoronoiMorphology ) { /* Preserve the alpha channel of input image - but turned it off */ (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel, exception); (void) CompositeImage(rslt_image,image,CopyAlphaCompositeOp, MagickTrue,0,0,exception); (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel, exception); } goto exit_cleanup; } /* Handle user (caller) specified multi-kernel composition method */ if ( compose != UndefinedCompositeOp ) rslt_compose = compose; /* override default composition for method */ if ( rslt_compose == UndefinedCompositeOp ) rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */ /* Some methods require a reflected kernel to use with primitives. * Create the reflected kernel for those methods. */ switch ( method ) { case CorrelateMorphology: case CloseMorphology: case CloseIntensityMorphology: case BottomHatMorphology: case SmoothMorphology: reflected_kernel = CloneKernelInfo(kernel); if (reflected_kernel == (KernelInfo *) NULL) goto error_cleanup; RotateKernelInfo(reflected_kernel,180); break; default: break; } /* Loops around more primitive morpholgy methods ** erose, dilate, open, close, smooth, edge, etc... */ /* Loop 1: iterate the compound method */ method_loop = 0; method_changed = 1; while ( method_loop < method_limit && method_changed > 0 ) { method_loop++; method_changed = 0; /* Loop 2: iterate over each kernel in a multi-kernel list */ norm_kernel = (KernelInfo *) kernel; this_kernel = (KernelInfo *) kernel; rflt_kernel = reflected_kernel; kernel_number = 0; while ( norm_kernel != NULL ) { /* Loop 3: Compound Morphology Staging - Select Primative to apply */ stage_loop = 0; /* the compound morphology stage number */ while ( stage_loop < stage_limit ) { stage_loop++; /* The stage of the compound morphology */ /* Select primitive morphology for this stage of compound method */ this_kernel = norm_kernel; /* default use unreflected kernel */ primitive = method; /* Assume method is a primitive */ switch( method ) { case ErodeMorphology: /* just erode */ case EdgeInMorphology: /* erode and image difference */ primitive = ErodeMorphology; break; case DilateMorphology: /* just dilate */ case EdgeOutMorphology: /* dilate and image difference */ primitive = DilateMorphology; break; case OpenMorphology: /* erode then dialate */ case TopHatMorphology: /* open and image difference */ primitive = ErodeMorphology; if ( stage_loop == 2 ) primitive = DilateMorphology; break; case OpenIntensityMorphology: primitive = ErodeIntensityMorphology; if ( stage_loop == 2 ) primitive = DilateIntensityMorphology; break; case CloseMorphology: /* dilate, then erode */ case BottomHatMorphology: /* close and image difference */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateMorphology; if ( stage_loop == 2 ) primitive = ErodeMorphology; break; case CloseIntensityMorphology: this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateIntensityMorphology; if ( stage_loop == 2 ) primitive = ErodeIntensityMorphology; break; case SmoothMorphology: /* open, close */ switch ( stage_loop ) { case 1: /* start an open method, which starts with Erode */ primitive = ErodeMorphology; break; case 2: /* now Dilate the Erode */ primitive = DilateMorphology; break; case 3: /* Reflect kernel a close */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateMorphology; break; case 4: /* Finish the Close */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = ErodeMorphology; break; } break; case EdgeMorphology: /* dilate and erode difference */ primitive = DilateMorphology; if ( stage_loop == 2 ) { save_image = curr_image; /* save the image difference */ curr_image = (Image *) image; primitive = ErodeMorphology; } break; case CorrelateMorphology: /* A Correlation is a Convolution with a reflected kernel. ** However a Convolution is a weighted sum using a reflected ** kernel. It may seem stange to convert a Correlation into a ** Convolution as the Correlation is the simplier method, but ** Convolution is much more commonly used, and it makes sense to ** implement it directly so as to avoid the need to duplicate the ** kernel when it is not required (which is typically the ** default). */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = ConvolveMorphology; break; default: break; } assert( this_kernel != (KernelInfo *) NULL ); /* Extra information for debugging compound operations */ if (verbose != MagickFalse) { if ( stage_limit > 1 ) (void) FormatLocaleString(v_info,MagickPathExtent,"%s:%.20g.%.20g -> ", CommandOptionToMnemonic(MagickMorphologyOptions,method),(double) method_loop,(double) stage_loop); else if ( primitive != method ) (void) FormatLocaleString(v_info, MagickPathExtent, "%s:%.20g -> ", CommandOptionToMnemonic(MagickMorphologyOptions, method),(double) method_loop); else v_info[0] = '\0'; } /* Loop 4: Iterate the kernel with primitive */ kernel_loop = 0; kernel_changed = 0; changed = 1; while ( kernel_loop < kernel_limit && changed > 0 ) { kernel_loop++; /* the iteration of this kernel */ /* Create a clone as the destination image, if not yet defined */ if ( work_image == (Image *) NULL ) { work_image=CloneImage(image,0,0,MagickTrue,exception); if (work_image == (Image *) NULL) goto error_cleanup; if (SetImageStorageClass(work_image,DirectClass,exception) == MagickFalse) goto error_cleanup; } /* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */ count++; changed = MorphologyPrimitive(curr_image, work_image, primitive, this_kernel, bias, exception); if (verbose != MagickFalse) { if ( kernel_loop > 1 ) (void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */ (void) (void) FormatLocaleFile(stderr, "%s%s%s:%.20g.%.20g #%.20g => Changed %.20g", v_info,CommandOptionToMnemonic(MagickMorphologyOptions, primitive),(this_kernel == rflt_kernel ) ? "*" : "", (double) (method_loop+kernel_loop-1),(double) kernel_number, (double) count,(double) changed); } if ( changed < 0 ) goto error_cleanup; kernel_changed += changed; method_changed += changed; /* prepare next loop */ { Image *tmp = work_image; /* swap images for iteration */ work_image = curr_image; curr_image = tmp; } if ( work_image == image ) work_image = (Image *) NULL; /* replace input 'image' */ } /* End Loop 4: Iterate the kernel with primitive */ if (verbose != MagickFalse && kernel_changed != (size_t)changed) (void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed); if (verbose != MagickFalse && stage_loop < stage_limit) (void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */ #if 0 (void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image); (void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image); (void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image); (void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image); (void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image); #endif } /* End Loop 3: Primative (staging) Loop for Coumpound Methods */ /* Final Post-processing for some Compound Methods ** ** The removal of any 'Sync' channel flag in the Image Compositon ** below ensures the methematical compose method is applied in a ** purely mathematical way, and only to the selected channels. ** Turn off SVG composition 'alpha blending'. */ switch( method ) { case EdgeOutMorphology: case EdgeInMorphology: case TopHatMorphology: case BottomHatMorphology: if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, "\n%s: Difference with original image",CommandOptionToMnemonic( MagickMorphologyOptions, method) ); (void) CompositeImage(curr_image,image,DifferenceCompositeOp, MagickTrue,0,0,exception); break; case EdgeMorphology: if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, "\n%s: Difference of Dilate and Erode",CommandOptionToMnemonic( MagickMorphologyOptions, method) ); (void) CompositeImage(curr_image,save_image,DifferenceCompositeOp, MagickTrue,0,0,exception); save_image = DestroyImage(save_image); /* finished with save image */ break; default: break; } /* multi-kernel handling: re-iterate, or compose results */ if ( kernel->next == (KernelInfo *) NULL ) rslt_image = curr_image; /* just return the resulting image */ else if ( rslt_compose == NoCompositeOp ) { if (verbose != MagickFalse) { if ( this_kernel->next != (KernelInfo *) NULL ) (void) FormatLocaleFile(stderr, " (re-iterate)"); else (void) FormatLocaleFile(stderr, " (done)"); } rslt_image = curr_image; /* return result, and re-iterate */ } else if ( rslt_image == (Image *) NULL) { if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, " (save for compose)"); rslt_image = curr_image; curr_image = (Image *) image; /* continue with original image */ } else { /* Add the new 'current' result to the composition ** ** The removal of any 'Sync' channel flag in the Image Compositon ** below ensures the methematical compose method is applied in a ** purely mathematical way, and only to the selected channels. ** IE: Turn off SVG composition 'alpha blending'. */ if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, " (compose \"%s\")", CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) ); (void) CompositeImage(rslt_image,curr_image,rslt_compose,MagickTrue, 0,0,exception); curr_image = DestroyImage(curr_image); curr_image = (Image *) image; /* continue with original image */ } if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, "\n"); /* loop to the next kernel in a multi-kernel list */ norm_kernel = norm_kernel->next; if ( rflt_kernel != (KernelInfo *) NULL ) rflt_kernel = rflt_kernel->next; kernel_number++; } /* End Loop 2: Loop over each kernel */ } /* End Loop 1: compound method interation */ goto exit_cleanup; /* Yes goto's are bad, but it makes cleanup lot more efficient */ error_cleanup: if ( curr_image == rslt_image ) curr_image = (Image *) NULL; if ( rslt_image != (Image *) NULL ) rslt_image = DestroyImage(rslt_image); exit_cleanup: if ( curr_image == rslt_image || curr_image == image ) curr_image = (Image *) NULL; if ( curr_image != (Image *) NULL ) curr_image = DestroyImage(curr_image); if ( work_image != (Image *) NULL ) work_image = DestroyImage(work_image); if ( save_image != (Image *) NULL ) save_image = DestroyImage(save_image); if ( reflected_kernel != (KernelInfo *) NULL ) reflected_kernel = DestroyKernelInfo(reflected_kernel); return(rslt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h o l o g y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MorphologyImage() applies a user supplied kernel to the image according to % the given mophology method. % % This function applies any and all user defined settings before calling % the above internal function MorphologyApply(). % % User defined settings include... % * Output Bias for Convolution and correlation ("-define convolve:bias=??") % * Kernel Scale/normalize settings ("-define convolve:scale=??") % This can also includes the addition of a scaled unity kernel. % * Show Kernel being applied ("-define morphology:showKernel=1") % % Other operators that do not want user supplied options interfering, % especially "convolve:bias" and "morphology:showKernel" should use % MorphologyApply() directly. % % The format of the MorphologyImage method is: % % Image *MorphologyImage(const Image *image,MorphologyMethod method, % const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: the morphology method to be applied. % % o iterations: apply the operation this many times (or no change). % A value of -1 means loop until no change found. % How this is applied may depend on the morphology method. % Typically this is a value of 1. % % o kernel: An array of double representing the morphology kernel. % Warning: kernel may be normalized for the Convolve method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphologyImage(const Image *image, const MorphologyMethod method,const ssize_t iterations, const KernelInfo *kernel,ExceptionInfo *exception) { const char *artifact; CompositeOperator compose; double bias; Image *morphology_image; KernelInfo *curr_kernel; curr_kernel = (KernelInfo *) kernel; bias=0.0; compose = UndefinedCompositeOp; /* use default for method */ /* Apply Convolve/Correlate Normalization and Scaling Factors. * This is done BEFORE the ShowKernelInfo() function is called so that * users can see the results of the 'option:convolve:scale' option. */ if ( method == ConvolveMorphology || method == CorrelateMorphology ) { /* Get the bias value as it will be needed */ artifact = GetImageArtifact(image,"convolve:bias"); if ( artifact != (const char *) NULL) { if (IsGeometry(artifact) == MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidSetting","'%s' '%s'", "convolve:bias",artifact); else bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0); } /* Scale kernel according to user wishes */ artifact = GetImageArtifact(image,"convolve:scale"); if ( artifact != (const char *) NULL ) { if (IsGeometry(artifact) == MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidSetting","'%s' '%s'", "convolve:scale",artifact); else { if ( curr_kernel == kernel ) curr_kernel = CloneKernelInfo(kernel); if (curr_kernel == (KernelInfo *) NULL) return((Image *) NULL); ScaleGeometryKernelInfo(curr_kernel, artifact); } } } /* display the (normalized) kernel via stderr */ artifact=GetImageArtifact(image,"morphology:showKernel"); if (IsStringTrue(artifact) != MagickFalse) ShowKernelInfo(curr_kernel); /* Override the default handling of multi-kernel morphology results * If 'Undefined' use the default method * If 'None' (default for 'Convolve') re-iterate previous result * Otherwise merge resulting images using compose method given. * Default for 'HitAndMiss' is 'Lighten'. */ { ssize_t parse; artifact = GetImageArtifact(image,"morphology:compose"); if ( artifact != (const char *) NULL) { parse=ParseCommandOption(MagickComposeOptions, MagickFalse,artifact); if ( parse < 0 ) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"UnrecognizedComposeOperator","'%s' '%s'", "morphology:compose",artifact); else compose=(CompositeOperator)parse; } } /* Apply the Morphology */ morphology_image = MorphologyApply(image,method,iterations, curr_kernel,compose,bias,exception); /* Cleanup and Exit */ if ( curr_kernel != kernel ) curr_kernel=DestroyKernelInfo(curr_kernel); return(morphology_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R o t a t e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotateKernelInfo() rotates the kernel by the angle given. % % Currently it is restricted to 90 degree angles, of either 1D kernels % or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels. % It will ignore usless rotations for specific 'named' built-in kernels. % % The format of the RotateKernelInfo method is: % % void RotateKernelInfo(KernelInfo *kernel, double angle) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o angle: angle to rotate in degrees % % This function is currently internal to this module only, but can be exported % to other modules if needed. */ static void RotateKernelInfo(KernelInfo *kernel, double angle) { /* angle the lower kernels first */ if ( kernel->next != (KernelInfo *) NULL) RotateKernelInfo(kernel->next, angle); /* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical ** ** TODO: expand beyond simple 90 degree rotates, flips and flops */ /* Modulus the angle */ angle = fmod(angle, 360.0); if ( angle < 0 ) angle += 360.0; if ( 337.5 < angle || angle <= 22.5 ) return; /* Near zero angle - no change! - At least not at this time */ /* Handle special cases */ switch (kernel->type) { /* These built-in kernels are cylindrical kernels, rotating is useless */ case GaussianKernel: case DoGKernel: case LoGKernel: case DiskKernel: case PeaksKernel: case LaplacianKernel: case ChebyshevKernel: case ManhattanKernel: case EuclideanKernel: return; /* These may be rotatable at non-90 angles in the future */ /* but simply rotating them in multiples of 90 degrees is useless */ case SquareKernel: case DiamondKernel: case PlusKernel: case CrossKernel: return; /* These only allows a +/-90 degree rotation (by transpose) */ /* A 180 degree rotation is useless */ case BlurKernel: if ( 135.0 < angle && angle <= 225.0 ) return; if ( 225.0 < angle && angle <= 315.0 ) angle -= 180; break; default: break; } /* Attempt rotations by 45 degrees -- 3x3 kernels only */ if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 ) { if ( kernel->width == 3 && kernel->height == 3 ) { /* Rotate a 3x3 square by 45 degree angle */ double t = kernel->values[0]; kernel->values[0] = kernel->values[3]; kernel->values[3] = kernel->values[6]; kernel->values[6] = kernel->values[7]; kernel->values[7] = kernel->values[8]; kernel->values[8] = kernel->values[5]; kernel->values[5] = kernel->values[2]; kernel->values[2] = kernel->values[1]; kernel->values[1] = t; /* rotate non-centered origin */ if ( kernel->x != 1 || kernel->y != 1 ) { ssize_t x,y; x = (ssize_t) kernel->x-1; y = (ssize_t) kernel->y-1; if ( x == y ) x = 0; else if ( x == 0 ) x = -y; else if ( x == -y ) y = 0; else if ( y == 0 ) y = x; kernel->x = (ssize_t) x+1; kernel->y = (ssize_t) y+1; } angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */ kernel->angle = fmod(kernel->angle+45.0, 360.0); } else perror("Unable to rotate non-3x3 kernel by 45 degrees"); } if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 ) { if ( kernel->width == 1 || kernel->height == 1 ) { /* Do a transpose of a 1 dimensional kernel, ** which results in a fast 90 degree rotation of some type. */ ssize_t t; t = (ssize_t) kernel->width; kernel->width = kernel->height; kernel->height = (size_t) t; t = kernel->x; kernel->x = kernel->y; kernel->y = t; if ( kernel->width == 1 ) { angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */ kernel->angle = fmod(kernel->angle+90.0, 360.0); } else { angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */ kernel->angle = fmod(kernel->angle+270.0, 360.0); } } else if ( kernel->width == kernel->height ) { /* Rotate a square array of values by 90 degrees */ { register ssize_t i,j,x,y; register MagickRealType *k,t; k=kernel->values; for( i=0, x=(ssize_t) kernel->width-1; i<=x; i++, x--) for( j=0, y=(ssize_t) kernel->height-1; j<y; j++, y--) { t = k[i+j*kernel->width]; k[i+j*kernel->width] = k[j+x*kernel->width]; k[j+x*kernel->width] = k[x+y*kernel->width]; k[x+y*kernel->width] = k[y+i*kernel->width]; k[y+i*kernel->width] = t; } } /* rotate the origin - relative to center of array */ { register ssize_t x,y; x = (ssize_t) (kernel->x*2-kernel->width+1); y = (ssize_t) (kernel->y*2-kernel->height+1); kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2; kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2; } angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */ kernel->angle = fmod(kernel->angle+90.0, 360.0); } else perror("Unable to rotate a non-square, non-linear kernel 90 degrees"); } if ( 135.0 < angle && angle <= 225.0 ) { /* For a 180 degree rotation - also know as a reflection * This is actually a very very common operation! * Basically all that is needed is a reversal of the kernel data! * And a reflection of the origon */ MagickRealType t; register MagickRealType *k; ssize_t i, j; k=kernel->values; j=(ssize_t) (kernel->width*kernel->height-1); for (i=0; i < j; i++, j--) t=k[i], k[i]=k[j], k[j]=t; kernel->x = (ssize_t) kernel->width - kernel->x - 1; kernel->y = (ssize_t) kernel->height - kernel->y - 1; angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */ kernel->angle = fmod(kernel->angle+180.0, 360.0); } /* At this point angle should at least between -45 (315) and +45 degrees * In the future some form of non-orthogonal angled rotates could be * performed here, posibily with a linear kernel restriction. */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e G e o m e t r y K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleGeometryKernelInfo() takes a geometry argument string, typically % provided as a "-set option:convolve:scale {geometry}" user setting, % and modifies the kernel according to the parsed arguments of that setting. % % The first argument (and any normalization flags) are passed to % ScaleKernelInfo() to scale/normalize the kernel. The second argument % is then passed to UnityAddKernelInfo() to add a scled unity kernel % into the scaled/normalized kernel. % % The format of the ScaleGeometryKernelInfo method is: % % void ScaleGeometryKernelInfo(KernelInfo *kernel, % const double scaling_factor,const MagickStatusType normalize_flags) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to modify % % o geometry: % The geometry string to parse, typically from the user provided % "-set option:convolve:scale {geometry}" setting. % */ MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel, const char *geometry) { MagickStatusType flags; GeometryInfo args; SetGeometryInfo(&args); flags = ParseGeometry(geometry, &args); #if 0 /* For Debugging Geometry Input */ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", flags, args.rho, args.sigma, args.xi, args.psi ); #endif if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/ args.rho *= 0.01, args.sigma *= 0.01; if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */ args.rho = 1.0; if ( (flags & SigmaValue) == 0 ) args.sigma = 0.0; /* Scale/Normalize the input kernel */ ScaleKernelInfo(kernel, args.rho, (GeometryFlags) flags); /* Add Unity Kernel, for blending with original */ if ( (flags & SigmaValue) != 0 ) UnityAddKernelInfo(kernel, args.sigma); return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleKernelInfo() scales the given kernel list by the given amount, with or % without normalization of the sum of the kernel values (as per given flags). % % By default (no flags given) the values within the kernel is scaled % directly using given scaling factor without change. % % If either of the two 'normalize_flags' are given the kernel will first be % normalized and then further scaled by the scaling factor value given. % % Kernel normalization ('normalize_flags' given) is designed to ensure that % any use of the kernel scaling factor with 'Convolve' or 'Correlate' % morphology methods will fall into -1.0 to +1.0 range. Note that for % non-HDRI versions of IM this may cause images to have any negative results % clipped, unless some 'bias' is used. % % More specifically. Kernels which only contain positive values (such as a % 'Gaussian' kernel) will be scaled so that those values sum to +1.0, % ensuring a 0.0 to +1.0 output range for non-HDRI images. % % For Kernels that contain some negative values, (such as 'Sharpen' kernels) % the kernel will be scaled by the absolute of the sum of kernel values, so % that it will generally fall within the +/- 1.0 range. % % For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel % will be scaled by just the sum of the postive values, so that its output % range will again fall into the +/- 1.0 range. % % For special kernels designed for locating shapes using 'Correlate', (often % only containing +1 and -1 values, representing foreground/brackground % matching) a special normalization method is provided to scale the positive % values separately to those of the negative values, so the kernel will be % forced to become a zero-sum kernel better suited to such searches. % % WARNING: Correct normalization of the kernel assumes that the '*_range' % attributes within the kernel structure have been correctly set during the % kernels creation. % % NOTE: The values used for 'normalize_flags' have been selected specifically % to match the use of geometry options, so that '!' means NormalizeValue, '^' % means CorrelateNormalizeValue. All other GeometryFlags values are ignored. % % The format of the ScaleKernelInfo method is: % % void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor, % const MagickStatusType normalize_flags ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o scaling_factor: % multiply all values (after normalization) by this factor if not % zero. If the kernel is normalized regardless of any flags. % % o normalize_flags: % GeometryFlags defining normalization method to use. % specifically: NormalizeValue, CorrelateNormalizeValue, % and/or PercentValue % */ MagickExport void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,const GeometryFlags normalize_flags) { register double pos_scale, neg_scale; register ssize_t i; /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags); /* Normalization of Kernel */ pos_scale = 1.0; if ( (normalize_flags&NormalizeValue) != 0 ) { if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon ) /* non-zero-summing kernel (generally positive) */ pos_scale = fabs(kernel->positive_range + kernel->negative_range); else /* zero-summing kernel */ pos_scale = kernel->positive_range; } /* Force kernel into a normalized zero-summing kernel */ if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) { pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon ) ? kernel->positive_range : 1.0; neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon ) ? -kernel->negative_range : 1.0; } else neg_scale = pos_scale; /* finialize scaling_factor for positive and negative components */ pos_scale = scaling_factor/pos_scale; neg_scale = scaling_factor/neg_scale; for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) if (!IsNaN(kernel->values[i])) kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale; /* convolution output range */ kernel->positive_range *= pos_scale; kernel->negative_range *= neg_scale; /* maximum and minimum values in kernel */ kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale; kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale; /* swap kernel settings if user's scaling factor is negative */ if ( scaling_factor < MagickEpsilon ) { double t; t = kernel->positive_range; kernel->positive_range = kernel->negative_range; kernel->negative_range = t; t = kernel->maximum; kernel->maximum = kernel->minimum; kernel->minimum = 1; } return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h o w K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShowKernelInfo() outputs the details of the given kernel defination to % standard error, generally due to a users 'morphology:showKernel' option % request. % % The format of the ShowKernel method is: % % void ShowKernelInfo(const KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % */ MagickPrivate void ShowKernelInfo(const KernelInfo *kernel) { const KernelInfo *k; size_t c, i, u, v; for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) { (void) FormatLocaleFile(stderr, "Kernel"); if ( kernel->next != (KernelInfo *) NULL ) (void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c ); (void) FormatLocaleFile(stderr, " \"%s", CommandOptionToMnemonic(MagickKernelOptions, k->type) ); if ( fabs(k->angle) >= MagickEpsilon ) (void) FormatLocaleFile(stderr, "@%lg", k->angle); (void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long) k->width,(unsigned long) k->height,(long) k->x,(long) k->y); (void) FormatLocaleFile(stderr, " with values from %.*lg to %.*lg\n", GetMagickPrecision(), k->minimum, GetMagickPrecision(), k->maximum); (void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg", GetMagickPrecision(), k->negative_range, GetMagickPrecision(), k->positive_range); if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon ) (void) FormatLocaleFile(stderr, " (Zero-Summing)\n"); else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon ) (void) FormatLocaleFile(stderr, " (Normalized)\n"); else (void) FormatLocaleFile(stderr, " (Sum %.*lg)\n", GetMagickPrecision(), k->positive_range+k->negative_range); for (i=v=0; v < k->height; v++) { (void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v ); for (u=0; u < k->width; u++, i++) if (IsNaN(k->values[i])) (void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan"); else (void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3, GetMagickPrecision(), (double) k->values[i]); (void) FormatLocaleFile(stderr,"\n"); } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n i t y A d d K e r n a l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel % to the given pre-scaled and normalized Kernel. This in effect adds that % amount of the original image into the resulting convolution kernel. This % value is usually provided by the user as a percentage value in the % 'convolve:scale' setting. % % The resulting effect is to convert the defined kernels into blended % soft-blurs, unsharp kernels or into sharpening kernels. % % The format of the UnityAdditionKernelInfo method is: % % void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o scale: % scaling factor for the unity kernel to be added to % the given kernel. % */ MagickExport void UnityAddKernelInfo(KernelInfo *kernel, const double scale) { /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) UnityAddKernelInfo(kernel->next, scale); /* Add the scaled unity kernel to the existing kernel */ kernel->values[kernel->x+kernel->y*kernel->width] += scale; CalcKernelMetaData(kernel); /* recalculate the meta-data */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Z e r o K e r n e l N a n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroKernelNans() replaces any special 'nan' value that may be present in % the kernel with a zero value. This is typically done when the kernel will % be used in special hardware (GPU) convolution processors, to simply % matters. % % The format of the ZeroKernelNans method is: % % void ZeroKernelNans (KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % */ MagickPrivate void ZeroKernelNans(KernelInfo *kernel) { register size_t i; /* do the other kernels in a multi-kernel list first */ if (kernel->next != (KernelInfo *) NULL) ZeroKernelNans(kernel->next); for (i=0; i < (kernel->width*kernel->height); i++) if (IsNaN(kernel->values[i])) kernel->values[i]=0.0; return; }
GB_unaryop__lnot_int16_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int16_int8 // op(A') function: GB_tran__lnot_int16_int8 // C type: int16_t // A type: int8_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int8_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT16 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int16_int8 ( int16_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int16_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
bml_adjungate_triangle_dense_typed.c
#include "../../macros.h" #include "../../typed.h" #include "../bml_adjungate_triangle.h" #include "../bml_allocate.h" #include "../bml_logger.h" #include "../bml_types.h" #include "bml_adjungate_triangle_dense.h" #include "bml_allocate_dense.h" #include "bml_types_dense.h" #include <complex.h> #include <stdlib.h> #include <string.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif /** Adjungates a triangle of a matrix in place. * * \ingroup adjungate_triangle_group * * \param A[in,out] The matrix for which the triangle should be adjungated * \param triangle[in] Which triangle to adjungate ('u': upper, 'l': lower) */ void TYPED_FUNC( bml_adjungate_triangle_dense) ( bml_matrix_dense_t * A, char *triangle) { int N = A->N; REAL_T *A_matrix = A->matrix; switch (*triangle) { case 'u': #pragma omp parallel for shared(N, A_matrix) for (int i = 0; i < N - 1; i++) { for (int j = i + 1; j < N; j++) { A_matrix[ROWMAJOR(j, i, N, N)] = conj(A_matrix[ROWMAJOR(i, j, N, N)]); } } break; case 'l': #pragma omp parallel for shared(N, A_matrix) for (int i = 0; i < N - 1; i++) { for (int j = i + 1; j < N; j++) { A_matrix[ROWMAJOR(i, j, N, N)] = conj(A_matrix[ROWMAJOR(j, i, N, N)]); } } break; default: LOG_ERROR("Unknown triangle type in bml_adjungate\n"); } }
GB_binop__pow_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_int16) // A.*B function (eWiseMult): GB (_AemultB_08__pow_int16) // A.*B function (eWiseMult): GB (_AemultB_02__pow_int16) // A.*B function (eWiseMult): GB (_AemultB_04__pow_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_int16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_int16) // C+=b function (dense accum): GB (_Cdense_accumb__pow_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_int16) // C=scalar+B GB (_bind1st__pow_int16) // C=scalar+B' GB (_bind1st_tran__pow_int16) // C=A+scalar GB (_bind2nd__pow_int16) // C=A'+scalar GB (_bind2nd_tran__pow_int16) // C type: int16_t // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = GB_pow_int16 (aij, bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow_int16 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_INT16 || GxB_NO_POW_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pow_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_pow_int16 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_pow_int16 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_int16 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_int16 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
builder.h
// Copyright (c) 2015, The Regents of the University of California (Regents) // See LICENSE.txt for license details #ifndef BUILDER_H_ #define BUILDER_H_ #include <algorithm> #include <cinttypes> #include <fstream> #include <functional> #include <type_traits> #include <utility> #include "command_line.h" #include "generator.h" #include "graph.h" #include "platform_atomics.h" #include "pvector.h" #include "reader.h" #include "timer.h" #include "util.h" /* GAP Benchmark Suite Class: BuilderBase Author: Scott Beamer Given arguements from the command line (cli), returns a built graph - MakeGraph() will parse cli and obtain edgelist and call MakeGraphFromEL(edgelist) to perform actual graph construction - edgelist can be from file (reader) or synthetically generated (generator) - Common case: BuilderBase typedef'd (w/ params) to be Builder (benchmark.h) */ template <typename NodeID_, typename DestID_ = NodeID_, typename WeightT_ = NodeID_, bool invert = true> class BuilderBase { typedef EdgePair<NodeID_, DestID_> Edge; typedef pvector<Edge> EdgeList; const CLBase &cli_; bool symmetrize_; bool needs_weights_; //int64_t num_nodes_ = -1; uint64_t num_nodes_ = 0; public: explicit BuilderBase(const CLBase &cli) : cli_(cli) { symmetrize_ = cli_.symmetrize(); needs_weights_ = !std::is_same<NodeID_, DestID_>::value; } DestID_ GetSource(EdgePair<NodeID_, NodeID_> e) { return e.u; } DestID_ GetSource(EdgePair<NodeID_, NodeWeight<NodeID_, WeightT_>> e) { return NodeWeight<NodeID_, WeightT_>(e.u, e.v.w); } NodeID_ FindMaxNodeID(const EdgeList &el) { NodeID_ max_seen = 0; #pragma omp parallel for reduction(max : max_seen) for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; max_seen = std::max(max_seen, e.u); max_seen = std::max(max_seen, (NodeID_) e.v); } return max_seen; } pvector<NodeID_> CountDegrees(const EdgeList &el, bool transpose) { pvector<NodeID_> degrees(num_nodes_, 0); #pragma omp parallel for for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; if (symmetrize_ || (!symmetrize_ && !transpose)) fetch_and_add(degrees[e.u], 1); if (symmetrize_ || (!symmetrize_ && transpose)) fetch_and_add(degrees[(NodeID_) e.v], 1); } return degrees; } static pvector<SGOffset> PrefixSum(const pvector<NodeID_> &degrees) { pvector<SGOffset> sums(degrees.size() + 1); SGOffset total = 0; for (size_t n=0; n < degrees.size(); n++) { sums[n] = total; total += degrees[n]; } sums[degrees.size()] = total; return sums; } static pvector<SGOffset> ParallelPrefixSum(const pvector<NodeID_> &degrees) { const size_t block_size = 1<<20; const size_t num_blocks = (degrees.size() + block_size - 1) / block_size; pvector<SGOffset> local_sums(num_blocks); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { SGOffset lsum = 0; size_t block_end = std::min((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) lsum += degrees[i]; local_sums[block] = lsum; } pvector<SGOffset> bulk_prefix(num_blocks+1); SGOffset total = 0; for (size_t block=0; block < num_blocks; block++) { bulk_prefix[block] = total; total += local_sums[block]; } bulk_prefix[num_blocks] = total; pvector<SGOffset> prefix(degrees.size() + 1); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { SGOffset local_total = bulk_prefix[block]; size_t block_end = std::min((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) { prefix[i] = local_total; local_total += degrees[i]; } } prefix[degrees.size()] = bulk_prefix[num_blocks]; std::cerr << "prefix[0] : " << prefix[0] << " , prefix[last] : " << prefix[degrees.size()] << " nodes : " << degrees.size() << "\n"; return prefix; } // Removes self-loops and redundant edges // Side effect: neighbor IDs will be sorted void SquishCSR(const CSRGraph<NodeID_, DestID_, invert> &g, bool transpose, DestID_*** sq_index, DestID_** sq_neighs) { pvector<NodeID_> diffs(g.num_nodes()); DestID_ *n_start, *n_end; #pragma omp parallel for private(n_start, n_end) for (NodeID_ n=0; n < g.num_nodes(); n++) { if (transpose) { n_start = g.in_neigh(n).begin(); n_end = g.in_neigh(n).end(); } else { n_start = g.out_neigh(n).begin(); n_end = g.out_neigh(n).end(); } std::sort(n_start, n_end); DestID_ *new_end = std::unique(n_start, n_end); new_end = std::remove(n_start, new_end, n); diffs[n] = new_end - n_start; } pvector<SGOffset> sq_offsets = ParallelPrefixSum(diffs); *sq_neighs = new DestID_[sq_offsets[g.num_nodes()]]; *sq_index = CSRGraph<NodeID_, DestID_>::GenIndex(sq_offsets, *sq_neighs); #pragma omp parallel for private(n_start) for (NodeID_ n=0; n < g.num_nodes(); n++) { if (transpose) n_start = g.in_neigh(n).begin(); else n_start = g.out_neigh(n).begin(); std::copy(n_start, n_start+diffs[n], (*sq_index)[n]); } } CSRGraph<NodeID_, DestID_, invert> SquishGraph( const CSRGraph<NodeID_, DestID_, invert> &g) { DestID_ **out_index, *out_neighs, **in_index, *in_neighs; SquishCSR(g, false, &out_index, &out_neighs); if (g.directed()) { if (invert) SquishCSR(g, true, &in_index, &in_neighs); return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index, out_neighs, in_index, in_neighs); } else { return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index, out_neighs); } } /* Graph Bulding Steps (for CSR): - Read edgelist once to determine vertex degrees (CountDegrees) - Determine vertex offsets by a prefix sum (ParallelPrefixSum) - Allocate storage and set points according to offsets (GenIndex) - Copy edges into storage */ void MakeCSR(const EdgeList &el, bool transpose, DestID_*** index, DestID_** neighs) { pvector<NodeID_> degrees = CountDegrees(el, transpose); pvector<SGOffset> offsets = ParallelPrefixSum(degrees); *neighs = new DestID_[offsets[num_nodes_]]; *index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, *neighs); #pragma omp parallel for for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; if (symmetrize_ || (!symmetrize_ && !transpose)) (*neighs)[fetch_and_add(offsets[e.u], 1)] = e.v; if (symmetrize_ || (!symmetrize_ && transpose)) (*neighs)[fetch_and_add(offsets[static_cast<NodeID_>(e.v)], 1)] = GetSource(e); } } CSRGraph<NodeID_, DestID_, invert> MakeGraphFromEL(EdgeList &el) { DestID_ **index = nullptr, **inv_index = nullptr; DestID_ *neighs = nullptr, *inv_neighs = nullptr; Timer t; t.Start(); //if (num_nodes_ == -1) if (num_nodes_ == 0) num_nodes_ = FindMaxNodeID(el)+1; if (needs_weights_) Generator<NodeID_, DestID_, WeightT_>::InsertWeights(el); MakeCSR(el, false, &index, &neighs); if (!symmetrize_ && invert) MakeCSR(el, true, &inv_index, &inv_neighs); t.Stop(); PrintTime("Build Time", t.Seconds()); if (symmetrize_) return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs); else return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs, inv_index, inv_neighs); } CSRGraph<NodeID_, DestID_, invert> MakeGraphFromGR(std::string &filename, std::string &filename_transpose) { DestID_ **index = nullptr, **inv_index = nullptr; DestID_ *neighs = nullptr, *inv_neighs = nullptr; Timer t; t.Start(); std::ifstream in(filename); if (!in.is_open()) { std::cout << "Couldn't open file " << filename << std::endl; std::exit(-2); } Timer timer_ggr; timer_ggr.Start(); uint64_t header[4]; in.read(reinterpret_cast<char*>(header), sizeof(uint64_t) * 4); uint64_t version = header[0]; uint32_t numNodes = header[2]; uint64_t numEdges = header[3]; std::cout<<"Disk: NumNodes: "<<numNodes<<" NumEdges: "<<numEdges<<"\n"; std::cerr<<"Disk: NumNodes: "<<numNodes<<" NumEdges: "<<numEdges<<"\n"; num_nodes_ = numNodes; pvector<SGOffset> offsets(numNodes+1); uint64_t readPosition = (4 * sizeof(uint64_t)); in.seekg(readPosition); offsets[0] = 0; in.read(reinterpret_cast<char*>(&offsets[1]), sizeof(SGOffset)*(numNodes)); std::cerr << " offsets[last]: " << offsets[numNodes] << "\n"; neighs = new DestID_[numEdges]; index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, neighs); readPosition = ((4 + numNodes) * sizeof(uint64_t)); in.seekg(readPosition); std::cout << "version = " << version << "\n"; std::cerr << "version = " << version << "\n"; if(version == 1) { in.read(reinterpret_cast<char*>((neighs)), sizeof(uint32_t)*numEdges); readPosition = ((4 + numNodes) * sizeof(uint64_t) + numEdges * sizeof(uint32_t)); // version 1 padding TODO make version agnostic if (numEdges% 2) { readPosition += sizeof(uint32_t); } } else if(version == 2) { in.read(reinterpret_cast<char*>((neighs)), sizeof(uint64_t)*numEdges); readPosition = ((4 + numNodes) * sizeof(uint64_t) + numEdges * sizeof(uint64_t)); if (numEdges % 2) { readPosition += sizeof(uint64_t); } } else { std::cerr << "ERROR: Unknown graph file version.\n"; abort(); } std::cout << "Done constructing original graph\n"; std::ifstream in_transpose(filename_transpose); std::cerr << "read transpose \n"; if (!in_transpose.is_open()) { std::cout << "Couldn't open file " << filename_transpose << std::endl; std::exit(-2); } readPosition = (4 * sizeof(uint64_t)); offsets[0] = 0; in_transpose.seekg(readPosition); in_transpose.read(reinterpret_cast<char*>(&offsets[1]), sizeof(SGOffset)*(numNodes)); std::cerr << "read transpose offset \n"; inv_neighs = new DestID_[numEdges]; inv_index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, inv_neighs); std::cerr << "read transpose dst\n"; readPosition = ((4 + numNodes) * sizeof(uint64_t)); in_transpose.seekg(readPosition); if(version == 1) { in_transpose.read(reinterpret_cast<char*>((inv_neighs)), sizeof(uint32_t)*numEdges); readPosition = ((4 + numNodes) * sizeof(uint64_t) + numEdges * sizeof(uint32_t)); // version 1 padding TODO make version agnostic if (numEdges% 2) { readPosition += sizeof(uint32_t); } } else if(version == 2) { in_transpose.read(reinterpret_cast<char*>((inv_neighs)), sizeof(uint64_t)*numEdges); readPosition = ((4 + numNodes) * sizeof(uint64_t) + numEdges * sizeof(uint64_t)); if (numEdges % 2) { readPosition += sizeof(uint64_t); } } else { std::cerr << "ERROR: Unknown graph file version.\n"; abort(); } std::cout << "Done constructing transpose graph\n"; return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs, inv_index, inv_neighs); } CSRGraph<NodeID_, DestID_, invert> MakeGraph(bool removeDuplicateEdges = true, bool useTranspose = false) { CSRGraph<NodeID_, DestID_, invert> g; if(useTranspose){ std::string fname = cli_.filename(); std::string ftname = cli_.filename_transpose(); std::cout << "Original graph : " << fname << "\n"; std::cout << "Transpose graph : " << ftname << "\n"; g = MakeGraphFromGR(fname, ftname); } else { // extra scope to trigger earlier deletion of el (save memory) EdgeList el; if (cli_.filename() != "") { Reader<NodeID_, DestID_, WeightT_, invert> r(cli_.filename()); if ((r.GetSuffix() == ".sg") || (r.GetSuffix() == ".wsg")) { return r.ReadSerializedGraph(); } else { el = r.ReadFile(needs_weights_); } } else if (cli_.scale() != -1) { Generator<NodeID_, DestID_> gen(cli_.scale(), cli_.degree()); el = gen.GenerateEL(cli_.uniform()); } g = MakeGraphFromEL(el); } if(removeDuplicateEdges) return SquishGraph(g); else return g; } // Relabels (and rebuilds) graph by order of decreasing degree static CSRGraph<NodeID_, DestID_, invert> RelabelByDegree( const CSRGraph<NodeID_, DestID_, invert> &g) { if (g.directed()) { std::cout << "Cannot relabel directed graph" << std::endl; std::exit(-11); } Timer t; t.Start(); typedef std::pair<int64_t, NodeID_> degree_node_p; pvector<degree_node_p> degree_id_pairs(g.num_nodes()); #pragma omp parallel for for (NodeID_ n=0; n < g.num_nodes(); n++) degree_id_pairs[n] = std::make_pair(g.out_degree(n), n); std::sort(degree_id_pairs.begin(), degree_id_pairs.end(), std::greater<degree_node_p>()); pvector<NodeID_> degrees(g.num_nodes()); pvector<NodeID_> new_ids(g.num_nodes()); #pragma omp parallel for for (NodeID_ n=0; n < g.num_nodes(); n++) { degrees[n] = degree_id_pairs[n].first; new_ids[degree_id_pairs[n].second] = n; } pvector<SGOffset> offsets = ParallelPrefixSum(degrees); DestID_* neighs = new DestID_[offsets[g.num_nodes()]]; DestID_** index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, neighs); #pragma omp parallel for for (NodeID_ u=0; u < g.num_nodes(); u++) { for (NodeID_ v : g.out_neigh(u)) neighs[offsets[new_ids[u]]++] = new_ids[v]; std::sort(index[new_ids[u]], index[new_ids[u]+1]); } t.Stop(); PrintTime("Relabel", t.Seconds()); return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), index, neighs); } }; #endif // BUILDER_H_
kernels.h
template<typename divisor_type> void throughput_test( const int n, divisor_type d1, divisor_type d2, divisor_type d3, int dummy, int * buf) { #pragma omp target teams distribute parallel for thread_limit(256) \ map(to: n, d1, d2, d3, dummy) map(alloc: buf[0:1]) for (int x = 0; x < n; x++) { int x1 = x / d1; int x2 = x / d2; int x3 = x / d3; int aggregate = x1 + x2 + x3; if (aggregate & dummy == 1) buf[0] = aggregate; } } template<typename divisor_type> void latency_test( const int n, divisor_type d1, divisor_type d2, divisor_type d3, divisor_type d4, divisor_type d5, divisor_type d6, divisor_type d7, divisor_type d8, divisor_type d9, divisor_type d10, int dummy, int * buf) { #pragma omp target teams distribute parallel for thread_limit(256) \ map(to: n, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, dummy) map(alloc: buf[0:1]) for (int x = 0; x < n; x++) { x /= d1; x /= d2; x /= d3; x /= d4; x /= d5; x /= d6; x /= d7; x /= d8; x /= d9; x /= d10; if (x & dummy == 1) buf[0] = x; } } void check(const int n, int_fastdiv divisor, int * results) { #pragma omp target teams distribute parallel for thread_limit(256) for (int divident = 0; divident < n; divident++) { int quotient = divident / (int)divisor; int fast_quotient = divident / divisor; if (quotient != fast_quotient) { int error_id; #pragma omp atomic capture error_id = results[0]++; if (error_id == 0) { results[1] = divident; results[2] = quotient; results[3] = fast_quotient; } } divident = -divident; quotient = divident / (int)divisor; fast_quotient = divident / divisor; if (quotient != fast_quotient) { int error_id; #pragma omp atomic capture error_id = results[0]++; if (error_id == 0) { results[1] = divident; results[2] = quotient; results[3] = fast_quotient; } } } }
convolution_pack8to4_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack8to4_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const __fp16* bias_data_ptr = bias_data_fp16; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float16x4_t _sum = vdup_n_f16((__fp16)0.f); if (bias_data_ptr) { _sum = vld1_f16(bias_data_ptr + p * 4); } const __fp16* kptr = weight_data_fp16.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w * 8; for (int k = 0; k < maxk; k++) { float16x8_t _val = vld1q_f16(sptr + space_ofs[k] * 8); float16x4_t _w0 = vld1_f16(kptr); float16x4_t _w1 = vld1_f16(kptr + 4); float16x4_t _w2 = vld1_f16(kptr + 8); float16x4_t _w3 = vld1_f16(kptr + 12); float16x4_t _w4 = vld1_f16(kptr + 16); float16x4_t _w5 = vld1_f16(kptr + 20); float16x4_t _w6 = vld1_f16(kptr + 24); float16x4_t _w7 = vld1_f16(kptr + 28); _sum = vfma_laneq_f16(_sum, _w0, _val, 0); _sum = vfma_laneq_f16(_sum, _w1, _val, 1); _sum = vfma_laneq_f16(_sum, _w2, _val, 2); _sum = vfma_laneq_f16(_sum, _w3, _val, 3); _sum = vfma_laneq_f16(_sum, _w4, _val, 4); _sum = vfma_laneq_f16(_sum, _w5, _val, 5); _sum = vfma_laneq_f16(_sum, _w6, _val, 6); _sum = vfma_laneq_f16(_sum, _w7, _val, 7); kptr += 32; } } _sum = activation_ps(_sum, activation_type, activation_params); vst1_f16(outptr + j * 4, _sum); } outptr += outw * 4; } } }
writer.h
// ========================================================================== // SeqAn - The Library for Sequence Analysis // ========================================================================== // Copyright (c) 2006-2010, Knut Reinert, FU Berlin // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of Knut Reinert or the FU Berlin nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // // ========================================================================== // Author: Enrico Siragusa <enrico.siragusa@fu-berlin.de> // ========================================================================== // This file contains the Writer class. // ========================================================================== #ifndef SEQAN_EXTRAS_MASAI_WRITER_H_ #define SEQAN_EXTRAS_MASAI_WRITER_H_ #include <seqan/basic.h> #include <seqan/sequence.h> #include <seqan/file.h> #include <seqan/stream.h> #include "tags.h" #include "sequence_index.h" #include "matches.h" using namespace seqan; // ============================================================================ // Forwards // ============================================================================ // ============================================================================ // Tags, Classes, Enums // ============================================================================ // ---------------------------------------------------------------------------- // Class Writer // ---------------------------------------------------------------------------- template <typename TGenome, typename TReads, typename TFormat, typename TDistance, typename TSpec = void> struct Writer {}; template <typename TGenome, typename TReads, typename TDistance, typename TSpec> struct Writer<TGenome, TReads, Sam, TDistance, TSpec> { typedef BamIOContext<TFragmentStore::TContigNameStore> TBamIOContext; //typedef unsigned long TWord; //nhtran: moved to "sequence_index.h" typedef Stream<FileStream<File<> > > TStream; TFragmentStore & store; TGenome & genome; TReads & reads; TStream _stream; TBamIOContext _context; bool disabled; bool _writeCigar; //String<TWord> _readAligned; //nhtran: moved to object "reads" //const unsigned _wordLen; //nhtran: moved to "sequence_index.h" Writer(TFragmentStore & store, TGenome & genome, TReads & reads, bool disabled = false) : store(store), genome(genome), reads(reads), _context(store.contigNameStore, store.contigNameStoreCache), disabled(disabled), _writeCigar(true) //_wordLen(BitsPerValue<TWord>::VALUE) //nhtran: moved to "sequence_index.h" {} }; template <typename TGenome, typename TReads, typename TDistance, typename TSpec> struct Writer<TGenome, TReads, Raw, TDistance, TSpec> { typedef Match<> TMatch; typedef Stream<FileStream<File<>, TMatch> > TStream; TFragmentStore & store; TGenome & genome; TReads & reads; TStream _stream; bool disabled; Writer(TFragmentStore & store, TGenome & genome, TReads & reads, bool disabled = false) : store(store), genome(genome), reads(reads), disabled(disabled) {} }; // ============================================================================ // Metafunctions // ============================================================================ // ============================================================================ // Functions // ============================================================================ // ---------------------------------------------------------------------------- // Function onMatch() [Writer<Sam>] // ---------------------------------------------------------------------------- // Single-End template <typename TGenome, typename TReads, typename TDistance, typename TSpec, typename TContigId, typename TContigPos, typename TReadId, typename TErrors> inline void onMatch(Writer<TGenome, TReads, Raw, TDistance, TSpec> & writer, TContigId contigId, TContigPos beginPos, TContigPos endPos, TReadId readId, TErrors errors, bool reverseComplemented) { if (writer.disabled) return; // Fill record. Match<> match; fill(match, contigId, beginPos, endPos, readId, errors, reverseComplemented); // Write record. #pragma omp critical { streamWriteChar(writer._stream, match); } return; } template <typename TGenome, typename TReads, typename TDistance, typename TSpec, typename TContigId, typename TContigPos, typename TReadId, typename TErrors> inline void onMatch(Writer<TGenome, TReads, Sam, TDistance, TSpec> & writer, TContigId contigId, TContigPos beginPos, TContigPos endPos, TReadId readId, TErrors errors, bool reverseComplemented) { if (writer.disabled) return; typedef Align<TFragmentStore::TReadSeq, ArrayGaps> TAlign; TAlignedReadStoreElement alignedRead; TAlignedReadStoreElement alignedMate; TAlignQualityStoreElement alignQuality; TAlignedReadTagStoreElement alignedTags; // Fill aligned read. _fillAlignedRead(alignedRead, alignQuality, contigId, beginPos, endPos, readId, errors, reverseComplemented); // Check for secondary alignment. bool secondary = _checkSecondary(writer, alignedRead); // Align read only if cigar output is enabled. TAlign align; if (writer._writeCigar) _alignRead(writer, align, alignedRead, alignQuality, reverseComplemented); // Write aligned read. // _writeAlignedRead(writer.store, writer._stream, writer.context, // alignedRead, alignQuality, alignedTags, // alignedMate, align, secondary, Sam()); // Fill record. BamAlignmentRecord record; _fillRecord(writer.store, record, alignedRead, alignQuality, alignedTags, alignedMate, align, secondary, writer._writeCigar); //writer.reads.readsAlignments.at(readId).push_back(record); // Write record to target. #pragma omp critical { write2(writer._stream, record, writer._context, Sam()); } } // ---------------------------------------------------------------------------- // Function _fillAlignedRead() // ---------------------------------------------------------------------------- template <typename TContigId, typename TContigPos, typename TReadId, typename TErrors> inline void _fillAlignedRead(TAlignedReadStoreElement & alignedRead, TAlignQualityStoreElement & alignQuality, TContigId contigId, TContigPos beginPos, TContigPos endPos, TReadId readId, TErrors errors, bool reverseComplemented) { alignedRead.readId = readId; alignedRead.contigId = contigId; if (reverseComplemented) { alignedRead.beginPos = endPos; alignedRead.endPos = beginPos; } else { alignedRead.beginPos = beginPos; alignedRead.endPos = endPos; } alignQuality.errors = errors; } // ---------------------------------------------------------------------------- // Function _checkSecondary() [Writer<Sam>] // ---------------------------------------------------------------------------- template <typename TGenome, typename TReads, typename TDistance, typename TSpec> inline bool _checkSecondary(Writer<TGenome, TReads, Sam, TDistance, TSpec> const & writer, TAlignedReadStoreElement & alignedRead) { TWord mask = (TWord)1 << (alignedRead.readId % TWordLen); bool secondary = (writer.reads._readAligned[alignedRead.readId / TWordLen] & mask) != 0; writer.reads._readAligned[alignedRead.readId / TWordLen] |= mask; return secondary; } // ---------------------------------------------------------------------------- // Function _alignRead() [Writer<EditDistance>] // ---------------------------------------------------------------------------- template <typename TGenome, typename TReads, typename TFormat, typename TSpec, typename TAlign> inline void _alignRead(Writer<TGenome, TReads, TFormat, EditDistance, TSpec> const & writer, TAlign & align, TAlignedReadStoreElement & alignedRead, TAlignQualityStoreElement & alignQuality, bool reverseComplemented) { typedef TFragmentStore::TReadSeq TReadSeq; resize(rows(align), 2); assignSource(row(align, 0), infix(writer.store.contigStore[alignedRead.contigId].seq, std::min(alignedRead.beginPos, alignedRead.endPos), std::max(alignedRead.beginPos, alignedRead.endPos))); TReadSeqStoreSize readId = alignedRead.readId; if (reverseComplemented) readId += writer.reads.readsCount; TReadSeq const & readSeq = writer.store.readSeqStore[readId]; assignSource(row(align, 1), readSeq); // In this case no indels are possible. if ((alignQuality.errors <= 1) && (length(row(align, 0)) == length(row(align, 1)))) return; globalAlignment(align, Score<short, EditDistance>(), (short)-alignQuality.errors, (short)alignQuality.errors, NeedlemanWunsch()); } // ---------------------------------------------------------------------------- // Function open() [Writer] // ---------------------------------------------------------------------------- template <typename TGenome, typename TReads, typename TFormat, typename TDistance, typename TSpec, typename TString> bool open(Writer<TGenome, TReads, TFormat, TDistance, TSpec> & writer, TString const & fileName) { if (writer.disabled) return true; if (!open(writer._stream, toCString(fileName), OPEN_RDWR | OPEN_CREATE)) return false; _writeHeader(writer); return true; } // ---------------------------------------------------------------------------- // Function _writeHeader() [Writer] // ---------------------------------------------------------------------------- template <typename TGenome, typename TReads, typename TDistance, typename TSpec> void _writeHeader(Writer<TGenome, TReads, Sam, TDistance, TSpec> & writer) { _writeHeader(writer.store, writer._stream, writer._context, Sam()); } template <typename TGenome, typename TReads, typename TDistance, typename TSpec> void _writeHeader(Writer<TGenome, TReads, Raw, TDistance, TSpec> & writer) { } // ---------------------------------------------------------------------------- // Function close() [Writer] // ---------------------------------------------------------------------------- template <typename TGenome, typename TReads, typename TFormat, typename TDistance, typename TSpec> bool close(Writer<TGenome, TReads, TFormat, TDistance, TSpec> & writer) { if (writer.disabled) return true; return close(writer._stream); } // ---------------------------------------------------------------------------- // Function writeAlignments() [Writer] // ---------------------------------------------------------------------------- /* template <typename TGenome, typename TReads, typename TDistance, typename TSpec> void writeAlignments(Writer<TGenome, TReads, Sam, TDistance, TSpec> & writer) { if (writer.disabled) return; //#pragma omp parallel for for (TReadSeqStoreSize read_ID = 0; read_ID < writer.reads.readsCount; ++read_ID) { //#pragma omp critical { for (TAlignmentRecords::iterator it = writer.reads.readsAlignments.at(read_ID).begin(); it != writer.reads.readsAlignments.at(read_ID).end(); ++it) { write2(writer._stream, *it, writer._context, Sam()); } } } } */ #endif // #ifndef SEQAN_EXTRAS_MASAI_WRITER_H_
dist.h
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Dhiraj Kalamkar (Intel Corp.) ******************************************************************************/ #ifndef _DIST_H_ #define _DIST_H_ #ifdef USE_MPI #include <mpi.h> void dist_init(int*argc, char ***argv) { MPI_Init(argc, argv); } void dist_fini() { MPI_Finalize(); } int dist_get_rank() { int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); return rank; } int dist_get_size() { int size; MPI_Comm_size(MPI_COMM_WORLD, &size); return size; } void dist_barrier() { MPI_Barrier(MPI_COMM_WORLD); } void dist_alltoall(int count, float* sendbuf, float*recvbuf) { MPI_Alltoall(sendbuf, count, MPI_FLOAT, recvbuf, count, MPI_FLOAT, MPI_COMM_WORLD); } #elif defined(USE_CCL) #include <ccl.hpp> static ccl::communicator_t comm; void dist_init(int*argc, char ***argv) { comm = ccl::environment::instance().create_communicator(); } void dist_fini() { comm.reset(); } int dist_get_rank() { return comm->rank(); } int dist_get_size() { return comm->size(); } void dist_barrier() { comm->barrier(); } void dist_alltoall(int count, float* sendbuf, float*recvbuf) { comm->alltoall(sendbuf, recvbuf, (size_t)count, ccl::datatype::dt_float)->wait(); } #else void dist_init(int*argc, char ***argv) { return; } void dist_fini() { return; } int dist_get_rank() { return 0; } int dist_get_size() { return 1; } void dist_barrier() { return; } void dist_alltoall(int count, float* sendbuf, float*recvbuf) { #pragma omp parallel for for(int i = 0; i < count; i++) { recvbuf[i] = sendbuf[i]; } } #endif #endif /* _DIST_H_ */
GB_unop__identity_uint8_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint8_uint64 // op(A') function: GB_unop_tran__identity_uint8_uint64 // C type: uint8_t // A type: uint64_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = (uint8_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint8_uint64 ( uint8_t *Cx, // Cx and Ax may be aliased const uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; uint8_t z = (uint8_t) aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint8_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
adjvectorbqm.h
// Copyright 2020 D-Wave Systems Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef DIMOD_ADJVECTORBQM_H_ #define DIMOD_ADJVECTORBQM_H_ #include <algorithm> #include <stdio.h> #include <utility> #include <vector> #include "dimod/utils.h" namespace dimod { template<class V, class B> class AdjVectorBQM { public: using bias_type = B; using variable_type = V; using size_type = std::size_t; using outvars_iterator = typename std::vector<std::pair<V, B>>::iterator; using const_outvars_iterator = typename std::vector<std::pair<V, B>>::const_iterator; // in the future we'd probably like to make this protected std::vector<std::pair<std::vector<std::pair<V, B>>, B>> adj; AdjVectorBQM() {} template<class BQM> explicit AdjVectorBQM(const BQM &bqm) { adj.resize(bqm.num_variables()); for (variable_type v = 0; v < bqm.num_variables(); ++v) { linear(v) = bqm.linear(v); auto span = bqm.neighborhood(v); adj[v].first.insert(adj[v].first.begin(), span.first, span.second); } } /** * Construct a BQM from a dense array. * * @param dense An array containing the biases. Assumed to contain * `num_variables`^2 elements. The upper and lower triangle are summed. * @param num_variables The number of variables. */ template<class B2> AdjVectorBQM(const B2 dense[], size_type num_variables, bool ignore_diagonal = false) { // we know how big our linear is going to be adj.resize(num_variables); bias_type qbias; if (!ignore_diagonal) { for (size_type v = 0; v < num_variables; ++v) { adj[v].second = dense[v*(num_variables+1)]; } } for (size_type u = 0; u < num_variables; ++u) { for (size_type v = u + 1; v < num_variables; ++v) { qbias = dense[u*num_variables+v] + dense[v*num_variables+u]; if (qbias != 0) { adj[u].first.emplace_back(v, qbias); adj[v].first.emplace_back(u, qbias); } } } } /** * Construct a BQM from a dense array. This constructor is parallelized * and temporarily zeroes out the diagonal of the dense array but restores * it back. * * @param dense An array containing the biases. Assumed to contain * `num_variables`^2 elements. The upper and lower triangle are summed. * @param num_variables The number of variables. */ template <class B2> AdjVectorBQM(B2 dense[], size_type num_variables, bool ignore_diagonal = false) { // we know how big our linear is going to be adj.resize(num_variables); // Backup copy of the diagonal of the dense matrix. std::vector<B2> dense_diagonal(num_variables); if (!ignore_diagonal) { #pragma omp parallel for for (size_type v = 0; v < num_variables; ++v) { adj[v].second = dense[v * (num_variables + 1)]; } } #pragma omp parallel { // Zero out the diagonal to avoid expensive checks inside innermost // loop in the code for reading the matrix. The diagonal will be // restored so a backup copy is saved. #pragma omp for schedule(static) for (size_type v = 0; v < num_variables; ++v) { dense_diagonal[v] = dense[v * (num_variables + 1)]; dense[v * (num_variables + 1)] = 0; } size_type counters[BLOCK_SIZE] = {0}; size_type buffer_size = num_variables * BLOCK_SIZE * sizeof(std::pair<variable_type, bias_type>); std::pair<variable_type, bias_type> *temp_buffer = (std::pair<variable_type, bias_type> *)malloc(buffer_size); if (temp_buffer == NULL) { printf("Memory allocation failure.\n"); exit(0); } // We process the matrix in blocks of size BLOCK_SIZE*BLOCK_SIZE to take // advantage of cache locality. Dynamic scheduling is used as we know some // blocks may be more sparse than others and processing them may finish earlier. #pragma omp for schedule(dynamic) for (size_type u_st = 0; u_st < num_variables; u_st += BLOCK_SIZE) { size_type u_end = std::min(u_st + BLOCK_SIZE, num_variables); for (size_type v_st = 0; v_st < num_variables; v_st += BLOCK_SIZE) { size_type v_end = std::min(v_st + BLOCK_SIZE, num_variables); for (size_type u = u_st, n = 0; u < u_end; u++, n++) { size_type counter_u = counters[n]; size_type counter_u_old = counter_u; for (size_type v = v_st; v < v_end; v++) { bias_type qbias = dense[u * num_variables + v] + dense[v * num_variables + u]; if (qbias != 0) { temp_buffer[n * num_variables + counter_u++] = {v, qbias}; } } if (counter_u != counter_u_old) { counters[n] = counter_u; } } } for (size_type n = 0; n < BLOCK_SIZE; n++) { if (counters[n]) { adj[u_st + n].first.assign(temp_buffer + n * num_variables, temp_buffer + n * num_variables + counters[n]); counters[n] = 0; } } } free(temp_buffer); // Restore the diagonal of the original dense matrix #pragma omp for schedule(static) for (size_type v = 0; v < num_variables; ++v) { dense[v * (num_variables + 1)] = dense_diagonal[v]; } } } /// Add one (disconnected) variable to the BQM and return its index. variable_type add_variable() { adj.resize(adj.size()+1); return adj.size()-1; } /// Get the degree of variable `v`. size_type degree(variable_type v) const { return adj[v].first.size(); } [[deprecated("Use AdjVectorBQM::linear(v)")]] bias_type get_linear(variable_type v) const { return linear(v); } std::pair<bias_type, bool> get_quadratic(variable_type u, variable_type v) const { assert(u >= 0 && u < adj.size()); assert(v >= 0 && v < adj.size()); assert(u != v); auto span = neighborhood(u); auto low = std::lower_bound(span.first, span.second, v, utils::comp_v<V, B>); if (low == span.second || low->first != v) return std::make_pair(0, false); return std::make_pair(low->second, true); } bias_type& linear(variable_type v) { assert(v >= 0 && v < adj.size()); return adj[v].second; } const bias_type& linear(variable_type v) const { assert(v >= 0 && v < adj.size()); return adj[v].second; } std::pair<outvars_iterator, outvars_iterator> neighborhood(variable_type u) { assert(u >= 0 && u < adj.size()); return std::make_pair(adj[u].first.begin(), adj[u].first.end()); } std::pair<const_outvars_iterator, const_outvars_iterator> neighborhood(variable_type u) const { assert(u >= 0 && u < adj.size()); return std::make_pair(adj[u].first.cbegin(), adj[u].first.cend()); } /** * The neighborhood of variable `v`. * * @param A variable `v`. * @param The neighborhood will start with the first out variable that * does not compare less than `start`. * * @returns A pair of iterators pointing to the start and end of the * neighborhood. */ std::pair<const_outvars_iterator, const_outvars_iterator> neighborhood(variable_type v, variable_type start) const { auto span = neighborhood(v); auto low = std::lower_bound(span.first, span.second, start, utils::comp_v<V, B>); return std::make_pair(low, span.second); } size_type num_variables() const { return adj.size(); } size_type num_interactions() const { size_type count = 0; for (auto it = adj.begin(); it != adj.end(); ++it) count += it->first.size(); return count / 2; } variable_type pop_variable() { assert(adj.size() > 0); variable_type v = adj.size() - 1; // remove v from all of its neighbor's neighborhoods for (auto it = adj[v].first.cbegin(); it != adj[v].first.cend(); ++it) { auto span = neighborhood(it->first); auto low = std::lower_bound(span.first, span.second, v, utils::comp_v<V, B>); adj[it->first].first.erase(low); } adj.pop_back(); return adj.size(); } bool remove_interaction(variable_type u, variable_type v) { assert(u >= 0 && u < adj.size()); assert(v >= 0 && v < adj.size()); auto span = neighborhood(u); auto low = std::lower_bound(span.first, span.second, v, utils::comp_v<V, B>); bool exists = !(low == span.second || low->first != v); if (exists) { adj[u].first.erase(low); span = neighborhood(v); low = std::lower_bound(span.first, span.second, u, utils::comp_v<V, B>); assert(!(low == span.second || low->first != u) == exists); adj[v].first.erase(low); } return exists; } [[deprecated("Use AdjVectorBQM::linear(v)")]] void set_linear(variable_type v, bias_type b) { assert(v >= 0 && v < adj.size()); linear(v) = b; } bool set_quadratic(variable_type u, variable_type v, bias_type b) { assert(u >= 0 && u < adj.size()); assert(v >= 0 && v < adj.size()); assert(u != v); auto span = neighborhood(u); auto low = std::lower_bound(span.first, span.second, v, utils::comp_v<V, B>); bool exists = !(low == span.second || low->first != v); if (exists) { low->second = b; } else { adj[u].first.emplace(low, v, b); } span = neighborhood(v); low = std::lower_bound(span.first, span.second, u, utils::comp_v<V, B>); assert(!(low == span.second || low->first != u) == exists); if (exists) { low->second = b; } else { adj[v].first.emplace(low, u, b); } // to be consistent with AdjArrayBQM, we return whether the value was // set return true; } }; } // namespace dimod #endif // DIMOD_ADJVECTORBQM_H_
convolutiondepthwise_5x5_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw5x5s1_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); v4f32 _bias0 = bias ? (v4f32)__msa_ld_w(bias + g * 4, 0) : (v4f32)__msa_fill_w(0); const float* k0 = kernel.row(g); float* outptr0 = out.row(0); float* outptr1 = out.row(1); const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); const float* r3 = img0.row(3); const float* r4 = img0.row(4); const float* r5 = img0.row(5); int i = 0; for (; i + 1 < outh; i += 2) { int j = 0; for (; j < outw; j++) { v4f32 _sum0 = _bias0; v4f32 _sum1 = _bias0; v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k03 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k04 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k00, _r00); _sum0 = __msa_fmadd_w(_sum0, _k01, _r01); _sum0 = __msa_fmadd_w(_sum0, _k02, _r02); _sum0 = __msa_fmadd_w(_sum0, _k03, _r03); _sum0 = __msa_fmadd_w(_sum0, _k04, _r04); v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0); v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0); v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0); v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0); v4f32 _r14 = (v4f32)__msa_ld_w(r1 + 4 * 4, 0); _sum1 = __msa_fmadd_w(_sum1, _k00, _r10); _sum1 = __msa_fmadd_w(_sum1, _k01, _r11); _sum1 = __msa_fmadd_w(_sum1, _k02, _r12); _sum1 = __msa_fmadd_w(_sum1, _k03, _r13); _sum1 = __msa_fmadd_w(_sum1, _k04, _r14); v4f32 _k10 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k13 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k14 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k10, _r10); _sum0 = __msa_fmadd_w(_sum0, _k11, _r11); _sum0 = __msa_fmadd_w(_sum0, _k12, _r12); _sum0 = __msa_fmadd_w(_sum0, _k13, _r13); _sum0 = __msa_fmadd_w(_sum0, _k14, _r14); v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0); v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0); v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0); v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0); v4f32 _r24 = (v4f32)__msa_ld_w(r2 + 4 * 4, 0); _sum1 = __msa_fmadd_w(_sum1, _k10, _r20); _sum1 = __msa_fmadd_w(_sum1, _k11, _r21); _sum1 = __msa_fmadd_w(_sum1, _k12, _r22); _sum1 = __msa_fmadd_w(_sum1, _k13, _r23); _sum1 = __msa_fmadd_w(_sum1, _k14, _r24); v4f32 _k20 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k23 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k24 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k20, _r20); _sum0 = __msa_fmadd_w(_sum0, _k21, _r21); _sum0 = __msa_fmadd_w(_sum0, _k22, _r22); _sum0 = __msa_fmadd_w(_sum0, _k23, _r23); _sum0 = __msa_fmadd_w(_sum0, _k24, _r24); v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0); v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0); v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0); v4f32 _r33 = (v4f32)__msa_ld_w(r3 + 4 * 3, 0); v4f32 _r34 = (v4f32)__msa_ld_w(r3 + 4 * 4, 0); _sum1 = __msa_fmadd_w(_sum1, _k20, _r30); _sum1 = __msa_fmadd_w(_sum1, _k21, _r31); _sum1 = __msa_fmadd_w(_sum1, _k22, _r32); _sum1 = __msa_fmadd_w(_sum1, _k23, _r33); _sum1 = __msa_fmadd_w(_sum1, _k24, _r34); v4f32 _k30 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k31 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k32 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k33 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k34 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k30, _r30); _sum0 = __msa_fmadd_w(_sum0, _k31, _r31); _sum0 = __msa_fmadd_w(_sum0, _k32, _r32); _sum0 = __msa_fmadd_w(_sum0, _k33, _r33); _sum0 = __msa_fmadd_w(_sum0, _k34, _r34); v4f32 _r40 = (v4f32)__msa_ld_w(r4, 0); v4f32 _r41 = (v4f32)__msa_ld_w(r4 + 4, 0); v4f32 _r42 = (v4f32)__msa_ld_w(r4 + 4 * 2, 0); v4f32 _r43 = (v4f32)__msa_ld_w(r4 + 4 * 3, 0); v4f32 _r44 = (v4f32)__msa_ld_w(r4 + 4 * 4, 0); _sum1 = __msa_fmadd_w(_sum1, _k30, _r40); _sum1 = __msa_fmadd_w(_sum1, _k31, _r41); _sum1 = __msa_fmadd_w(_sum1, _k32, _r42); _sum1 = __msa_fmadd_w(_sum1, _k33, _r43); _sum1 = __msa_fmadd_w(_sum1, _k34, _r44); v4f32 _k40 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k41 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k42 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k43 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k44 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 -= 4 * 20; _sum0 = __msa_fmadd_w(_sum0, _k40, _r40); _sum0 = __msa_fmadd_w(_sum0, _k41, _r41); _sum0 = __msa_fmadd_w(_sum0, _k42, _r42); _sum0 = __msa_fmadd_w(_sum0, _k43, _r43); _sum0 = __msa_fmadd_w(_sum0, _k44, _r44); v4f32 _r50 = (v4f32)__msa_ld_w(r5, 0); v4f32 _r51 = (v4f32)__msa_ld_w(r5 + 4, 0); v4f32 _r52 = (v4f32)__msa_ld_w(r5 + 4 * 2, 0); v4f32 _r53 = (v4f32)__msa_ld_w(r5 + 4 * 3, 0); v4f32 _r54 = (v4f32)__msa_ld_w(r5 + 4 * 4, 0); _sum1 = __msa_fmadd_w(_sum1, _k40, _r50); _sum1 = __msa_fmadd_w(_sum1, _k41, _r51); _sum1 = __msa_fmadd_w(_sum1, _k42, _r52); _sum1 = __msa_fmadd_w(_sum1, _k43, _r53); _sum1 = __msa_fmadd_w(_sum1, _k44, _r54); __msa_st_w((v4i32)_sum0, outptr0, 0); __msa_st_w((v4i32)_sum1, outptr1, 0); outptr0 += 4; outptr1 += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } r0 += 4 * 4 + w * 4; r1 += 4 * 4 + w * 4; r2 += 4 * 4 + w * 4; r3 += 4 * 4 + w * 4; r4 += 4 * 4 + w * 4; r5 += 4 * 4 + w * 4; outptr0 += outw * 4; outptr1 += outw * 4; } for (; i < outh; i++) { int j = 0; for (; j < outw; j++) { v4f32 _sum0 = _bias0; v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k03 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k04 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k00, _r00); _sum0 = __msa_fmadd_w(_sum0, _k01, _r01); _sum0 = __msa_fmadd_w(_sum0, _k02, _r02); _sum0 = __msa_fmadd_w(_sum0, _k03, _r03); _sum0 = __msa_fmadd_w(_sum0, _k04, _r04); v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0); v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0); v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0); v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0); v4f32 _r14 = (v4f32)__msa_ld_w(r1 + 4 * 4, 0); v4f32 _k10 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k13 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k14 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k10, _r10); _sum0 = __msa_fmadd_w(_sum0, _k11, _r11); _sum0 = __msa_fmadd_w(_sum0, _k12, _r12); _sum0 = __msa_fmadd_w(_sum0, _k13, _r13); _sum0 = __msa_fmadd_w(_sum0, _k14, _r14); v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0); v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0); v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0); v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0); v4f32 _r24 = (v4f32)__msa_ld_w(r2 + 4 * 4, 0); v4f32 _k20 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k23 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k24 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k20, _r20); _sum0 = __msa_fmadd_w(_sum0, _k21, _r21); _sum0 = __msa_fmadd_w(_sum0, _k22, _r22); _sum0 = __msa_fmadd_w(_sum0, _k23, _r23); _sum0 = __msa_fmadd_w(_sum0, _k24, _r24); v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0); v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0); v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0); v4f32 _r33 = (v4f32)__msa_ld_w(r3 + 4 * 3, 0); v4f32 _r34 = (v4f32)__msa_ld_w(r3 + 4 * 4, 0); v4f32 _k30 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k31 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k32 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k33 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k34 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k30, _r30); _sum0 = __msa_fmadd_w(_sum0, _k31, _r31); _sum0 = __msa_fmadd_w(_sum0, _k32, _r32); _sum0 = __msa_fmadd_w(_sum0, _k33, _r33); _sum0 = __msa_fmadd_w(_sum0, _k34, _r34); v4f32 _r40 = (v4f32)__msa_ld_w(r4, 0); v4f32 _r41 = (v4f32)__msa_ld_w(r4 + 4, 0); v4f32 _r42 = (v4f32)__msa_ld_w(r4 + 4 * 2, 0); v4f32 _r43 = (v4f32)__msa_ld_w(r4 + 4 * 3, 0); v4f32 _r44 = (v4f32)__msa_ld_w(r4 + 4 * 4, 0); v4f32 _k40 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k41 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k42 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k43 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k44 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 -= 4 * 20; _sum0 = __msa_fmadd_w(_sum0, _k40, _r40); _sum0 = __msa_fmadd_w(_sum0, _k41, _r41); _sum0 = __msa_fmadd_w(_sum0, _k42, _r42); _sum0 = __msa_fmadd_w(_sum0, _k43, _r43); _sum0 = __msa_fmadd_w(_sum0, _k44, _r44); __msa_st_w((v4i32)_sum0, outptr0, 0); outptr0 += 4; r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; } r0 += 4 * 4; r1 += 4 * 4; r2 += 4 * 4; r3 += 4 * 4; r4 += 4 * 4; } } } static void convdw5x5s2_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 4; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); v4f32 _bias0 = bias ? (v4f32)__msa_ld_w(bias + g * 4, 0) : (v4f32)__msa_fill_w(0); const float* k0 = kernel.row(g); float* outptr0 = out; const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); const float* r3 = img0.row(3); const float* r4 = img0.row(4); int i = 0; for (; i < outh; i++) { int j = 0; for (; j < outw; j++) { v4f32 _sum0 = _bias0; v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0); v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0); v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0); v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0); v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0); v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k03 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k04 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k00, _r00); _sum0 = __msa_fmadd_w(_sum0, _k01, _r01); _sum0 = __msa_fmadd_w(_sum0, _k02, _r02); _sum0 = __msa_fmadd_w(_sum0, _k03, _r03); _sum0 = __msa_fmadd_w(_sum0, _k04, _r04); v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0); v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0); v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0); v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0); v4f32 _r14 = (v4f32)__msa_ld_w(r1 + 4 * 4, 0); v4f32 _k10 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k13 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k14 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k10, _r10); _sum0 = __msa_fmadd_w(_sum0, _k11, _r11); _sum0 = __msa_fmadd_w(_sum0, _k12, _r12); _sum0 = __msa_fmadd_w(_sum0, _k13, _r13); _sum0 = __msa_fmadd_w(_sum0, _k14, _r14); v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0); v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0); v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0); v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0); v4f32 _r24 = (v4f32)__msa_ld_w(r2 + 4 * 4, 0); v4f32 _k20 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k23 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k24 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k20, _r20); _sum0 = __msa_fmadd_w(_sum0, _k21, _r21); _sum0 = __msa_fmadd_w(_sum0, _k22, _r22); _sum0 = __msa_fmadd_w(_sum0, _k23, _r23); _sum0 = __msa_fmadd_w(_sum0, _k24, _r24); v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0); v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0); v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0); v4f32 _r33 = (v4f32)__msa_ld_w(r3 + 4 * 3, 0); v4f32 _r34 = (v4f32)__msa_ld_w(r3 + 4 * 4, 0); v4f32 _k30 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k31 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k32 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k33 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k34 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 += 4 * 5; _sum0 = __msa_fmadd_w(_sum0, _k30, _r30); _sum0 = __msa_fmadd_w(_sum0, _k31, _r31); _sum0 = __msa_fmadd_w(_sum0, _k32, _r32); _sum0 = __msa_fmadd_w(_sum0, _k33, _r33); _sum0 = __msa_fmadd_w(_sum0, _k34, _r34); v4f32 _r40 = (v4f32)__msa_ld_w(r4, 0); v4f32 _r41 = (v4f32)__msa_ld_w(r4 + 4, 0); v4f32 _r42 = (v4f32)__msa_ld_w(r4 + 4 * 2, 0); v4f32 _r43 = (v4f32)__msa_ld_w(r4 + 4 * 3, 0); v4f32 _r44 = (v4f32)__msa_ld_w(r4 + 4 * 4, 0); v4f32 _k40 = (v4f32)__msa_ld_w(k0, 0); v4f32 _k41 = (v4f32)__msa_ld_w(k0 + 4, 0); v4f32 _k42 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0); v4f32 _k43 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0); v4f32 _k44 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0); k0 -= 4 * 20; _sum0 = __msa_fmadd_w(_sum0, _k40, _r40); _sum0 = __msa_fmadd_w(_sum0, _k41, _r41); _sum0 = __msa_fmadd_w(_sum0, _k42, _r42); _sum0 = __msa_fmadd_w(_sum0, _k43, _r43); _sum0 = __msa_fmadd_w(_sum0, _k44, _r44); __msa_st_w((v4i32)_sum0, outptr0, 0); outptr0 += 4; r0 += 4 * 2; r1 += 4 * 2; r2 += 4 * 2; r3 += 4 * 2; r4 += 4 * 2; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; } } }
fasta2fastq.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <getopt.h> #include <assert.h> #include <time.h> #include "file_buffer.h" #include "fasta2fastq.h" #include "lineindex_lib.h" #include <ctype.h> #include <omp.h> runtime_options options; typedef struct { size_t size; size_t used; char * base; } thread_buffer; thread_buffer * thread_buffers; void to_ascii_33(char * s) { char * prev=s; char * current=s; int index=0; while (isspace(*current) && *current!='\0') { current++; }; while (*current!='\0') { if (*current==' ') { *current='\0'; int temp=atoi(prev); s[index++]=(char)(34+(char)temp); current++; prev=current; while (isspace(*current) && *current!='\0') { current++; }; } else { current++; } } //last one if (*prev!='\0') { int temp=atoi(prev); s[index++]=(char)(34+(char)temp); } s[index]='\0'; return; } static void inline update_last_line(lineindex_table * li, file_buffer * fb, int lines_processed ) { //size_t lines_processed=li->end-li->start; char * qual_last_line=li->table[(li->start+lines_processed-1)%li->size]; //fprintf(stderr,"last line |%s|\n",qual_last_line); //fprintf(stderr,"last %lu\n",qual_last_line+strlen(qual_last_line)-fb->base); size_t qual_last_line_mod=qual_last_line-fb->base; size_t qual_start_mod=fb->unseen_start%fb->size; if (qual_last_line_mod >= qual_start_mod) { fb->unseen_start+=qual_last_line_mod-qual_start_mod; } else { fb->unseen_start+=fb->size-(qual_start_mod-qual_last_line_mod); } while(fb->base[fb->unseen_start%fb->size]!='\0' && fb->unseen_start<fb->unseen_end) { fb->unseen_start++; } //while(fb->base[fb->unseen_start%fb->size]=='\0' && fb->unseen_start<fb->unseen_end) { // fb->unseen_start++; //} return; } void usage(char * s) { fprintf(stderr, "usage: %s [options/parameters] <fasta> <qual>\n", s); fprintf(stderr, " <fasta> The fasta filename to read in\n"); fprintf(stderr, " <qual> The qual file corresponding to the fasta file\n"); fprintf(stderr, "Required:\n"); fprintf(stderr, " --qv-offset The ASCII offset for the integer values in the qual file\n"); fprintf(stderr, "Parameters: (all sizes are in bytes unless specified)\n"); fprintf(stderr, " --buffer-size File buffer size in memory per file (Default: %d)\n",DEF_BUFFER_SIZE); fprintf(stderr, " --read-size Read size, read into buffer with this (Default: %d)\n",DEF_READ_SIZE); fprintf(stderr,"\nOptions:\n"); fprintf(stderr, " --help This usage screen\n"); exit(1); } struct option long_op[] = { {"help", 0, 0, 5}, {"buffer-size", 1, 0, 6}, {"read-size", 1, 0, 7}, {"qv-offset",1,0,8}, {0,0,0,0} }; static inline bool fill_fb(file_buffer * fb) { time_t io_start_time=time(NULL); fprintf(stderr,"IO start ... "); bool has_changed=false; while (!fb->exhausted) { fill_read_buffer(&fb->frb); add_read_buffer_to_main(fb); if (!fb->exhausted && !fb->changed && fb->frb.eof==0) { fprintf(stderr,"too small buffer!\n"); exit(1); } has_changed=has_changed || fb->changed; } fprintf(stderr,"IO end ... %lu seconds\n",(time(NULL)-io_start_time)); return has_changed; //fprintf(stdout,"Filled %lu to %lu of %lu |%s|\n",fb->unseen_start, fb->unseen_end, fb->size,fb->base); } static void inline fill_fb_and_index(lineindex_table * li, lineindex_table ** thread_lineindexes, file_buffer * fb) { size_t old_em=fb->unseen_end%fb->size; if (fill_fb(fb)) { size_t newly_added; size_t em=fb->unseen_end%fb->size; if (em > old_em) { newly_added=add_lineindex_from_memory_threaded(li, thread_lineindexes,fb->base+old_em, em-old_em,options.threads, '#'); } else { newly_added=add_lineindex_from_memory_threaded(li, thread_lineindexes,fb->base+old_em, fb->size-old_em,options.threads, '#'); newly_added+=add_lineindex_from_memory_threaded(li, thread_lineindexes,fb->base, em,options.threads, '#'); } if (newly_added>0) { fb->exhausted=false; } } return; } static size_t inline string_to_byte_size(char * s) { char * x=s; while (isdigit(x[0])) {x++;}; size_t multiplier=1; if (*x=='K') { multiplier=1024; } else if (*x=='M') { multiplier=1024*1024; } else if (*x=='G') { multiplier=1024*1024*1024; } char old_x=*x; *x='\0'; int ret=atoi(s); *x=old_x; if (ret<=0) { return 0; } return ret*multiplier; } int main (int argc, char ** argv) { options.buffer_size=DEF_BUFFER_SIZE; options.read_size=DEF_READ_SIZE; options.threads=1; options.qv_offset=DEF_QV_OFFSET; int op_id; char short_op[] = "N:"; char c = getopt_long(argc, argv, short_op, long_op, &op_id); while (c != EOF) { switch (c) { case 6: options.buffer_size=string_to_byte_size(optarg); break; case 7: options.read_size=string_to_byte_size(optarg); break; case 5: usage(argv[0]); break; case 8: options.qv_offset=atoi(optarg); break; case 'N': options.threads=atoi(optarg); break; default: fprintf(stderr,"%d : %c , %d is not an option!\n",c,(char)c,op_id); usage(argv[0]); break; } c = getopt_long(argc, argv, short_op, long_op, &op_id); } if (options.qv_offset<=0) { fprintf(stderr,"Please specify a qv_offset. This is used when converting qual files into fastq format.\nFor SOLiD data this value will be most likely 34.\nFor Illumina data this value will be most likely 64, except for Illumina 1.8+ when it is 33.\n"); usage(argv[0]); exit(1); } fprintf(stderr,"Set to %d threads!\n",options.threads); if (argc<=optind+1) { fprintf(stderr,"Please specify reads file and at least one sam file!\n"); usage(argv[0]); } argc-=optind; argv+=optind; if (argc!=2) { fprintf(stderr,"Please specify both a fasta and qual file!\n"); usage(argv[0]); } //Variables for IO of read names char * fasta_filename=argv[0]; char * qual_filename=argv[1]; fprintf(stderr,"Using %s as fasta reads filename and %s as qual filename\n",fasta_filename,qual_filename); argc-=2; argv+=2; lineindex_table * qual_thread_lineindexes[options.threads]; int i; for (i=0; i<options.threads; i++) { qual_thread_lineindexes[i]=lineindex_init(1); } //master table lineindex_table * qual_li = lineindex_init(1); lineindex_table * fasta_thread_lineindexes[options.threads]; for (i=0; i<options.threads; i++) { fasta_thread_lineindexes[i]=lineindex_init(1); } //master table lineindex_table * fasta_li = lineindex_init(1); //set up the thread_buffers thread_buffers=(thread_buffer*)malloc(sizeof(thread_buffer)*options.threads); if (thread_buffers==NULL) { fprintf(stderr,"Failed to malloc memory for thread buffers!\n"); exit(1); } for (i=0; i<options.threads; i++ ) { thread_buffers[i].size=(options.buffer_size/options.threads+1000)*1.3; thread_buffers[i].base=(char*)malloc(sizeof(char)*thread_buffers[i].size); if (thread_buffers[i].base==NULL) { fprintf(stderr,"Failed to allocate memory for thread buffers!\n"); exit(1); } } //get the hit list, process it, do it again! fprintf(stderr,"Setting up buffer with size %lu and read_size %lu\n",options.buffer_size,options.read_size); file_buffer * qual_fb = fb_open(qual_filename,options.buffer_size,options.read_size); file_buffer * fasta_fb = fb_open(fasta_filename,options.buffer_size,options.read_size); size_t lines_processed=0; clock_t start_time=clock(); clock_t last_time=clock(); size_t iterations=0; bool first_loop=true; while (lines_processed!=0 || first_loop) { first_loop=false; //index lines fill_fb_and_index(qual_li, qual_thread_lineindexes,qual_fb); fill_fb_and_index(fasta_li, fasta_thread_lineindexes,fasta_fb); lines_processed=qual_li->end-qual_li->start; //print lines //figure out which thread handles which int lines_to_print[options.threads]; int start[options.threads]; for (i=0; i<options.threads; i++) { start[i]=(i==0 ? 0 : start[i-1]+lines_to_print[i-1]); lines_to_print[i]=lines_processed/options.threads+(lines_processed%options.threads > i ? 1 : 0); //fprintf(stderr,"Thread %d , start %d , lines %d\n",i,start[i],lines_to_print[i]); } #pragma omp parallel { int thread_id = omp_get_thread_num(); thread_buffer * ob = thread_buffers+thread_id; ob->used=0; ob->base[0]='\0'; int i; for (i=start[thread_id]; i<start[thread_id]+lines_to_print[thread_id]; i++) { //fprintf(stderr,"Running %d on %d, %d\n",thread_id,i,(fasta_li->start+i)%fasta_li->size); //fprintf(stdout,"@%s\n",fasta_li->table[(fasta_li->start+i)%fasta_li->size]+1); char * to_print=fasta_li->table[(fasta_li->start+i)%fasta_li->size]; char * qual_string=qual_li->table[(qual_li->start+i)%qual_li->size]; //fprintf(stderr,"F |%s| vs |%s| \n",to_print,qual_string); while (strlen(to_print)+strlen(qual_string)+ob->used>ob->size) { ob->size*=1.3; ob->base=(char*)realloc(ob->base,sizeof(char)*ob->size); if (ob->base==NULL) { fprintf(stderr,"Failed to allocate memory for thread_buffer expand\n"); exit(1); } } //qual string moves between read names and quals, check if we are printing a read name or qual if (qual_string[0]=='>') { ob->used+=sprintf(ob->base+ob->used,"@%s\n",fasta_li->table[(fasta_li->start+i)%fasta_li->size]+1); } else { to_ascii_33(qual_li->table[(qual_li->start+i)%qual_li->size]); ob->used+=sprintf(ob->base+ob->used,"%s\n",fasta_li->table[(fasta_li->start+i)%fasta_li->size]); ob->used+=sprintf(ob->base+ob->used,"+\n%s\n",qual_li->table[(qual_li->start+i)%qual_li->size]); } //fprintf(stderr,"%s and %s\n",to_print,qual_string); //assert(qual_string[0]!='>'); } } //print the blocks for (i=0; i<options.threads; i++) { fprintf(stdout,"%s",thread_buffers[i].base); } update_last_line(qual_li,qual_fb,lines_processed); update_last_line(fasta_li,fasta_fb,lines_processed); //fprintf(stderr,"%lu %lu\n",qual_fb->unseen_start,qual_fb->unseen_end); //fprintf(stderr,"END OF IT |%s|\n",qual_fb->base+qual_fb->unseen_start%qual_fb->size); qual_li->start+=lines_processed; fasta_li->start+=lines_processed; if (lines_processed>0) { qual_fb->exhausted=false; fasta_fb->exhausted=false; } iterations++; if ( (clock()-last_time)/options.threads > CLOCKS_PER_SEC/4) { double lines_per_second=qual_li->start/( (double)(clock()-start_time)/(CLOCKS_PER_SEC*options.threads)); double lines_per_iteration=qual_li->start/(double)iterations; fprintf(stderr,"Processing overall at %lf reads / second, %lf reads / iteration, processed %lu, lines on this iteration %lu\n",lines_per_second,lines_per_iteration,qual_li->start,lines_processed); last_time=clock(); } } //free the line-indexes for (i=0; i<options.threads; i++) { lineindex_destroy(qual_thread_lineindexes[i]); lineindex_destroy(fasta_thread_lineindexes[i]); } //free the master index lineindex_destroy(qual_li); lineindex_destroy(fasta_li); //close the file_buffers fb_close(qual_fb); fb_close(fasta_fb); return 0; }
poisson.c
# include <stdlib.h> # include <stdio.h> # include <math.h> # include <time.h> # include <string.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #if defined(_OPENMP) # include <omp.h> #endif # include "poisson.h" # include "main.h" #include "../../common/Utils.h" double r8mat_rms(int nx, int ny, double *a_); void rhs(int nx, int ny, double *f_, int block_size); void timestamp(void); double u_exact(double x, double y); double uxxyy_exact(double x, double y); /* Purpose: MAIN is the main program for POISSON_OPENMP. Discussion: POISSON_OPENMP is a program for solving the Poisson problem. This program uses OpenMP for parallel execution. The Poisson equation - DEL^2 U(X,Y) = F(X,Y) is solved on the unit square [0,1] x [0,1] using a grid of NX by NX evenly spaced points. The first and last points in each direction are boundary points. The boundary conditions and F are set so that the exact solution is U(x,y) = sin ( pi * x * y) so that - DEL^2 U(x,y) = pi^2 * ( x^2 + y^2) * sin ( pi * x * y) The Jacobi iteration is repeatedly applied until convergence is detected. For convenience in writing the discretized equations, we assume that NX = NY. Licensing: This code is distributed under the GNU LGPL license. Modified: 14 December 2011 Author: John Burkardt */ /******************************************************************************/ double run(struct user_parameters* params) { int matrix_size = params->matrix_size; if (matrix_size <= 0) { matrix_size = 512; params->matrix_size = matrix_size; } int block_size = params->blocksize; if (block_size <= 0) { block_size = 128; params->blocksize = block_size; } int niter = params->titer; if (niter <= 0) { niter = 4; params->titer = niter; } int type = params->type; if (type <= 0) { type =1; params->type = type; } double dx; double dy; double error; int ii,i; int jj,j; int nx = matrix_size; int ny = matrix_size; double *f_ = malloc(nx * nx * sizeof(double)); double (*f)[nx][ny] = (double (*)[nx][ny])f_; double *u_ = malloc(nx * nx * sizeof(double)); double *unew_ = malloc(nx * ny * sizeof(double)); double (*unew)[nx][ny] = (double (*)[nx][ny])unew_; /* test if valid */ if ( (nx % block_size) || (ny % block_size) ) { params->succeed = 0; params->string2display = "*****ERROR: blocsize must divide NX and NY"; return 0; } /// INITIALISATION dx = 1.0 / (double) (nx - 1); dy = 1.0 / (double) (ny - 1); // Set the right hand side array F. rhs(nx, ny, f_, block_size); /* Set the initial solution estimate UNEW. We are "allowed" to pick up the boundary conditions exactly. */ #pragma omp parallel #pragma omp master //for collapse(2) for (j = 0; j < ny; j+= block_size) for (i = 0; i < nx; i+= block_size) #pragma omp task firstprivate(i,j) private(ii,jj) for (jj=j; jj<j+block_size; ++jj) for (ii=i; ii<i+block_size; ++ii) { if (ii == 0 || ii == nx - 1 || jj == 0 || jj == ny - 1) { (*unew)[ii][jj] = (*f)[ii][jj]; } else { (*unew)[ii][jj] = 0.0; } } /// KERNEL INTENSIVE COMPUTATION //START_TIMER; double t_start, t_end; t_start = rtclock(); if (type == 1){ sweep_task(nx, ny, dx, dy, f_, 0, niter, u_, unew_, block_size); } else if (type == 2) { sweep_task_dep(nx, ny, dx, dy, f_, 0, niter, u_, unew_, block_size); } else if (type == 3){ sweep_block_for(nx, ny, dx, dy, f_, 0, niter, u_, unew_, block_size); } else if (type == 4){ sweep_block_task(nx, ny, dx, dy, f_, 0, niter, u_, unew_, block_size); } else if (type == 5){ sweep_block_task_dep(nx, ny, dx, dy, f_, 0, niter, u_, unew_, block_size); } else if (type == 6){ sweep_seq(nx, ny, dx, dy, f_, 0, niter, u_, unew_); } if ( type > 6 ) { params->succeed = 0; params->string2display = "*****ERROR: type not known"; return 0; } //END_TIMER; t_end = rtclock(); /* modificar para checar o final do main*/ #ifdef _OPENMP if(params->check) { double x; double y; double *udiff_ = malloc(nx * ny * sizeof(double)); double (*udiff)[nx][ny] = (double (*)[nx][ny])udiff_; /// CHECK OUTPUT // Check for convergence. for (j = 0; j < ny; j++) { y = (double) (j) / (double) (ny - 1); for (i = 0; i < nx; i++) { x = (double) (i) / (double) (nx - 1); (*udiff)[i][j] = (*unew)[i][j] - u_exact(x, y); } } error = r8mat_rms(nx, ny, udiff_); double error1; // Set the right hand side array F. rhs(nx, ny, f_, block_size); /* Set the initial solution estimate UNEW. We are "allowed" to pick up the boundary conditions exactly. */ for (j = 0; j < ny; j++) { for (i = 0; i < nx; i++) { if (i == 0 || i == nx - 1 || j == 0 || j == ny - 1) { (*unew)[i][j] = (*f)[i][j]; } else { (*unew)[i][j] = 0.0; } } } sweep_seq(nx, ny, dx, dy, f_, 0, niter, u_, unew_); // Check for convergence. for (j = 0; j < ny; j++) { y = (double) (j) / (double) (ny - 1); for (i = 0; i < nx; i++) { x = (double) (i) / (double) (nx - 1); (*udiff)[i][j] = (*unew)[i][j] - u_exact(x, y); } } error1 = r8mat_rms(nx, ny, udiff_); params->succeed = fabs(error - error1) < 1.0E-6; free(udiff_); } #else params->succeed = 1; (void)error; #endif free(f_); free(u_); free(unew_); return (t_end - t_start); } /* R8MAT_RMS returns the RMS norm of a vector stored as a matrix. */ double r8mat_rms(int nx, int ny, double *a_) { double (*a)[nx][ny] = (double (*)[nx][ny])a_; int i; int j; double v; v = 0.0; for (j = 0; j < ny; j++) { for (i = 0; i < nx; i++) { v = v + (*a)[i][j] * (*a)[i][j]; } } v = sqrt(v / (double) (nx * ny)); return v; } /* RHS initializes the right hand side "vector". */ void rhs(int nx, int ny, double *f_, int block_size) { double (*f)[nx][ny] = (double (*)[nx][ny])f_; int i,ii; int j,jj; double x; double y; // The "boundary" entries of F store the boundary values of the solution. // The "interior" entries of F store the right hand sides of the Poisson equation. #pragma omp parallel #pragma omp master //for collapse(2) for (j = 0; j < ny; j+=block_size) for (i = 0; i < nx; i+=block_size) #pragma omp task firstprivate(block_size,i,j,nx,ny) private(ii,jj,x,y) for (jj=j; jj<j+block_size; ++jj) { y = (double) (jj) / (double) (ny - 1); for (ii=i; ii<i+block_size; ++ii) { x = (double) (ii) / (double) (nx - 1); if (ii == 0 || ii == nx - 1 || jj == 0 || jj == ny - 1) (*f)[ii][jj] = u_exact(x, y); else (*f)[ii][jj] = - uxxyy_exact(x, y); } } } /* Evaluates the exact solution. */ double u_exact(double x, double y) { double pi = 3.141592653589793; double value; value = sin(pi * x * y); return value; } /* Evaluates (d/dx d/dx + d/dy d/dy) of the exact solution. */ double uxxyy_exact(double x, double y) { double pi = 3.141592653589793; double value; value = - pi * pi * (x * x + y * y) * sin(pi * x * y); return value; }
GB_unaryop__abs_fp64_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_fp64_uint32 // op(A') function: GB_tran__abs_fp64_uint32 // C type: double // A type: uint32_t // cast: double cij = (double) aij // unaryop: cij = fabs (aij) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabs (x) ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_fp64_uint32 ( double *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_fp64_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ompfuncs.c
#include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <string.h> #include "ompfuncs.h" #define swapbytes(TYPE, i, j, n) { \ register TYPE *a=(TYPE *)i; \ register TYPE *b=(TYPE *)j; \ long k=n; \ do { \ register TYPE t=*a; \ *a++=*b; \ *b++=t; \ } while(--k>0); \ } static __inline void swap(void* to, void* src, size_t element_size) { //if (to!=src) { register char temp; register char *a=to; register char *b=src; int i; for (i=0; i<element_size; i++) { temp=b[i]; b[i]=a[i]; a[i]=temp; } } } static __inline void bytecopy(void* to, void* src, size_t element_size) { if (to!=src) { register char *a=to; register char *b=src; int i; for (i=0; i<element_size; i++) { a[i]=b[i]; } } } static __inline char * med3(a, b, c, cmp) char *a, *b, *c; int (*cmp)(const void *, const void *); { return cmp(a, b) < 0 ? (cmp(b, c) < 0 ? b : (cmp(a, c) < 0 ? c : a )) :(cmp(b, c) > 0 ? b : (cmp(a, c) < 0 ? a : c )); } static __inline int partition(void* data, size_t num_elements, size_t element_size, int (*comparer)(const void *, const void *)) { int store=0, i; char* base=data; int pivotIndex=0; int d; // char *pm,*pl,*pn; // pm = (char *)data + (num_elements / 2) * element_size; // if (num_elements > 7) { // pl = data; // pn = (char *)data + (num_elements - 1) * element_size; // if (num_elements > 40) { // d = (num_elements / 8) * element_size; // pl = med3(pl, pl + d, pl + 2 * d, comparer); // pm = med3(pm - d, pm, pm + d, comparer); // pn = med3(pn - 2 * d, pn - d, pn, comparer); // } // pm = med3(pl, pm, pn, comparer); // } if (comparer(&base[0], &base[element_size*(num_elements-1)])==1) { if (comparer(&base[0], &base[element_size*((int)((num_elements-1)/2))])!=1) pivotIndex=0; else if (comparer(&base[element_size*(num_elements-1)], &base[element_size*((int)((num_elements-1)/2))])==1) pivotIndex=num_elements-1; else pivotIndex=(int)((num_elements-1)/2); } else { if (comparer(&base[0], &base[element_size*((int)(num_elements-1)/2)])==1) pivotIndex=0; else if (comparer(&base[element_size*(num_elements-1)], &base[element_size*((int)((num_elements-1)/2))])!=1) pivotIndex=num_elements-1; else pivotIndex=(int)((num_elements-1)/2); } swapbytes(char , &base[element_size*(pivotIndex)], &base[element_size*(num_elements-1)], element_size); for (i=0; i<(num_elements-1); i++) { if (comparer(&base[element_size*i], &base[element_size*(num_elements-1)])!=1) { if (i!=store) swapbytes(char ,&base[element_size*i], &base[element_size*store], element_size); store++; } } swapbytes(char, &base[element_size*store], &base[element_size*(num_elements-1)], element_size); return store; } int partarray(void* data, size_t num_elements, size_t element_size, int (*comparer)(const void *, const void *), void **min, void **max) { int store=0, i; char* base=data; int pivotIndex=0; int d; char *pm,*pl,*pn; pm = (char *)data + (num_elements / 2) * element_size; if (num_elements > 7) { pl = data; pn = (char *)data + (num_elements - 1) * element_size; if (num_elements > 40) { d = (num_elements / 8) * element_size; pl = med3(pl, pl + d, pl + 2 * d, comparer); pm = med3(pm - d, pm, pm + d, comparer); pn = med3(pn - 2 * d, pn - d, pn, comparer); } pm = med3(pl, pm, pn, comparer); } // if (comparer(&base[0], &base[element_size*(num_elements-1)])==1) // { // if (comparer(&base[0], &base[element_size*((int)((num_elements-1)/2))])!=1) // pivotIndex=0; // else if (comparer(&base[element_size*(num_elements-1)], &base[element_size*((int)((num_elements-1)/2))])==1) // pivotIndex=num_elements-1; // else pivotIndex=(int)((num_elements-1)/2); // } else // { // if (comparer(&base[0], &base[element_size*((int)(num_elements-1)/2)])==1) // pivotIndex=0; // else if (comparer(&base[element_size*(num_elements-1)], &base[element_size*((int)((num_elements-1)/2))])!=1) // pivotIndex=num_elements-1; // else pivotIndex=(int)((num_elements-1)/2); // } // // swapbytes(char , &base[element_size*(pivotIndex)], &base[element_size*(num_elements-1)], element_size); *min=base; *max=base; swapbytes(char , pm, &base[element_size*(num_elements-1)], element_size); for (i=0; i<(num_elements-1); i++) { if (comparer(&base[element_size*i], &base[element_size*(num_elements-1)])!=1) { if (i!=store) { swapbytes(char ,&base[element_size*i], &base[element_size*store], element_size); } //if (comparer(*max, &base[element_size*store])==-1) *max=&base[element_size*store]; if (comparer(*min, &base[element_size*store])==1) *min=&base[element_size*store]; store++; } // if (comparer(*min, &base[element_size*i])==1) *min=&base[element_size*i]; if (comparer(*max, &base[element_size*i])==-1) *max=&base[element_size*i]; } if (store != i) swapbytes(char, &base[element_size*store], &base[element_size*(num_elements-1)], element_size); if (comparer(*max, &base[element_size*(num_elements-1)])==-1) *max=&base[element_size*(num_elements-1)]; // else if (comparer(*min, &base[element_size*(num_elements-1)])==1) *min=&base[element_size*(num_elements-1)]; return store; } static __inline void isort(void* data, size_t num_elements, size_t element_size, int (*comparer)(const void *, const void *)) { register int i, j=0; char *base=data; for (i=1; i< num_elements; i++) { j=i-1; while ((j>=0) && (comparer(&base[j*element_size], &base[(j+1)*element_size])==1)) { swapbytes(char ,&base[element_size*(j+1)],&base[element_size*j],element_size); j--; } } } void myqsort(void* data, size_t num_elements, size_t element_size, int (*comparer)(const void *, const void *)) { if (num_elements<=1) return; if (num_elements<10) {isort(data, num_elements, element_size, comparer); return;} int index= partition(data, num_elements, element_size, comparer); char *base=data; if (index<(num_elements/2)) { #pragma omp parallel sections //firstprivate(base, p, q, r) { #pragma omp section myqsort(base, index, element_size, comparer); #pragma omp section myqsort(&base[element_size*(index+1)], num_elements-(index+1), element_size, comparer); } } else { #pragma omp parallel sections //firstprivate(base, p, q, r) { #pragma omp section myqsort(&base[element_size*(index+1)], num_elements-(index+1), element_size, comparer); #pragma omp section myqsort(base, index, element_size, comparer); } } } void print_omp_info(void) { }
ZQ_CNN_MTCNN_Interface.h
#ifndef _ZQ_CNN_MTCNN_INTERFACE_H_ #define _ZQ_CNN_MTCNN_INTERFACE_H_ #pragma once #include "ZQ_CNN_Net_Interface.h" #include "ZQ_CNN_Tensor4D_Interface.h" #include "ZQ_CNN_BBoxUtils.h" #include <omp.h> namespace ZQ { template<class ZQ_CNN_Net_Interface, class ZQ_CNN_Tensor4D_Interface, class ZQ_CNN_Tensor4D_Interface_Base> class ZQ_CNN_MTCNN_Interface { public: using string = std::string; ZQ_CNN_MTCNN_Interface() { min_size = 60; thresh[0] = 0.6; thresh[1] = 0.7; thresh[2] = 0.7; nms_thresh[0] = 0.6; nms_thresh[1] = 0.7; nms_thresh[2] = 0.7; width = 0; height = 0; factor = 0.709; pnet_overlap_thresh_count = 4; pnet_size = 12; pnet_stride = 2; special_handle_very_big_face = false; force_run_pnet_multithread = false; show_debug_info = false; limit_r_num = 0; limit_o_num = 0; limit_l_num = 0; } ~ZQ_CNN_MTCNN_Interface() { } private: #if __ARM_NEON const int BATCH_SIZE = 16; #else const int BATCH_SIZE = 64; #endif std::vector<ZQ_CNN_Net_Interface> pnet, rnet, onet, lnet; bool has_lnet; int thread_num; float thresh[3], nms_thresh[3]; int min_size; int width, height; float factor; int pnet_overlap_thresh_count; int pnet_size; int pnet_stride; int rnet_size; int onet_size; int lnet_size; bool special_handle_very_big_face; bool do_landmark; float early_accept_thresh; float nms_thresh_per_scale; bool force_run_pnet_multithread; std::vector<float> scales; std::vector<ZQ_CNN_Tensor4D_Interface> pnet_images; ZQ_CNN_Tensor4D_Interface ori_input, rnet_image, onet_image; bool show_debug_info; int limit_r_num; int limit_o_num; int limit_l_num; public: void TurnOnShowDebugInfo() { show_debug_info = true; } void TurnOffShowDebugInfo() { show_debug_info = false; } void SetLimit(int limit_r = 0, int limit_o = 0, int limit_l = 0) { limit_r_num = limit_r; limit_o_num = limit_o; limit_l_num = limit_l; } bool Init(const string& pnet_param, const string& pnet_model, const string& rnet_param, const string& rnet_model, const string& onet_param, const string& onet_model, int thread_num = 1, bool has_lnet = false, const string& lnet_param = "", const std::string& lnet_model = "") { if (thread_num < 1) force_run_pnet_multithread = true; else force_run_pnet_multithread = false; thread_num = __max(1, thread_num); pnet.resize(thread_num); rnet.resize(thread_num); onet.resize(thread_num); this->has_lnet = has_lnet; if (has_lnet) { lnet.resize(thread_num); } bool ret = true; for (int i = 0; i < thread_num; i++) { ret = pnet[i].LoadFrom(pnet_param, pnet_model, true, 1e-9, true) && rnet[i].LoadFrom(rnet_param, rnet_model, true, 1e-9, true) && onet[i].LoadFrom(onet_param, onet_model, true, 1e-9, true); if (has_lnet && ret) ret = lnet[i].LoadFrom(lnet_param, lnet_model, true, 1e-9, true); if (!ret) break; } if (!ret) { pnet.clear(); rnet.clear(); onet.clear(); if (has_lnet) lnet.clear(); this->thread_num = 0; } else this->thread_num = thread_num; if (show_debug_info) { printf("rnet = %.2f M, onet = %.2f M\n", rnet[0].GetNumOfMulAdd() / (1024.0*1024.0), onet[0].GetNumOfMulAdd() / (1024.0*1024.0)); if (has_lnet) printf("lnet = %.2f M\n", lnet[0].GetNumOfMulAdd() / (1024.0*1024.0)); } int C, H, W; rnet[0].GetInputDim(C, H, W); rnet_size = H; onet[0].GetInputDim(C, H, W); onet_size = H; if (has_lnet) { lnet[0].GetInputDim(C, H, W); lnet_size = H; } return ret; } void SetPara(int w, int h, int min_face_size = 60, float pthresh = 0.6, float rthresh = 0.7, float othresh = 0.7, float nms_pthresh = 0.6, float nms_rthresh = 0.7, float nms_othresh = 0.7, float scale_factor = 0.709, int pnet_overlap_thresh_count = 4, int pnet_size = 12, int pnet_stride = 2, bool special_handle_very_big_face = false, bool do_landmark = true, float early_accept_thresh = 1.00) { min_size = __max(pnet_size, min_face_size); thresh[0] = __max(0.1, pthresh); thresh[1] = __max(0.1, rthresh); thresh[2] = __max(0.1, othresh); nms_thresh[0] = __max(0.1, nms_pthresh); nms_thresh[1] = __max(0.1, nms_rthresh); nms_thresh[2] = __max(0.1, nms_othresh); scale_factor = __max(0.5, __min(0.97, scale_factor)); this->pnet_overlap_thresh_count = __max(0, pnet_overlap_thresh_count); this->pnet_size = pnet_size; this->pnet_stride = pnet_stride; this->special_handle_very_big_face = special_handle_very_big_face; this->do_landmark = do_landmark; this->early_accept_thresh = early_accept_thresh; if (pnet_size == 20 && pnet_stride == 4) nms_thresh_per_scale = 0.45; else nms_thresh_per_scale = 0.495; if (width != w || height != h || factor != scale_factor) { scales.clear(); pnet_images.clear(); width = w; height = h; float minside = __min(width, height); int MIN_DET_SIZE = pnet_size; float m = (float)MIN_DET_SIZE / min_size; minside *= m; while (minside > MIN_DET_SIZE) { scales.push_back(m); minside *= factor; m *= factor; } minside = __min(width, height); int count = scales.size(); for (int i = scales.size() - 1; i >= 0; i--) { if (ceil(scales[i] * minside) <= pnet_size) { count--; } } if (special_handle_very_big_face) { if (count > 2) count--; scales.resize(count); if (count > 0) { float last_size = ceil(scales[count - 1] * minside); for (int tmp_size = last_size - 1; tmp_size >= pnet_size + 1; tmp_size -= 2) { scales.push_back((float)tmp_size / minside); count++; } } scales.push_back((float)pnet_size / minside); count++; } else { scales.push_back((float)pnet_size / minside); count++; } pnet_images.resize(count); } } bool Find(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& results) { double t1 = omp_get_wtime(); if (width != _width || height != _height) return false; if (!ori_input.ConvertFromBGR(bgr_img, width, height, _widthStep)) return false; double t2 = omp_get_wtime(); if (show_debug_info) printf("convert cost: %.3f ms\n", 1000 * (t2 - t1)); return Find(ori_input, results); } bool Find106(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox106>& results) { double t1 = omp_get_wtime(); if (width != _width || height != _height) return false; if (!ori_input.ConvertFromBGR(bgr_img, width, height, _widthStep)) return false; double t2 = omp_get_wtime(); if (show_debug_info) printf("convert cost: %.3f ms\n", 1000 * (t2 - t1)); return Find106(ori_input, results); } bool Find(ZQ_CNN_Tensor4D_Interface& input, std::vector<ZQ_CNN_BBox>& results) { double t1 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox; if (!_Pnet_stage(input, firstBbox)) return false; //results = firstBbox; //return true; if (limit_r_num > 0) { _select(firstBbox, limit_r_num, input.GetW(), input.GetH()); } double t2 = omp_get_wtime(); if (!_Rnet_stage(input, firstBbox, secondBbox)) return false; //results = secondBbox; //return true; if (limit_o_num > 0) { _select(secondBbox, limit_o_num, input.GetW(), input.GetH()); } if (!has_lnet || !do_landmark) { double t3 = omp_get_wtime(); if (!_Onet_stage(input, secondBbox, results)) return false; double t4 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms)\n", 1000 * (t4 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3)); } } else { double t3 = omp_get_wtime(); if (!_Onet_stage(input, secondBbox, thirdBbox)) return false; if (limit_l_num > 0) { _select(thirdBbox, limit_l_num, input.GetW(), input.GetH()); } double t4 = omp_get_wtime(); if (!_Lnet_stage(input, thirdBbox, results)) return false; double t5 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n", 1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4)); } } return true; } bool Find106(ZQ_CNN_Tensor4D_Interface& input, std::vector<ZQ_CNN_BBox106>& results) { double t1 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox; if (!_Pnet_stage(input, firstBbox)) return false; //results = firstBbox; //return true; if (limit_r_num > 0) { _select(firstBbox, limit_r_num, input.GetW(), input.GetH()); } double t2 = omp_get_wtime(); if (!_Rnet_stage(input, firstBbox, secondBbox)) return false; //results = secondBbox; //return true; if (limit_o_num > 0) { _select(secondBbox, limit_o_num, input.GetW(), input.GetH()); } if (!has_lnet || !do_landmark) { return false; } double t3 = omp_get_wtime(); if (!_Onet_stage(input, secondBbox, thirdBbox)) return false; if (limit_l_num > 0) { _select(thirdBbox, limit_l_num, input.GetW(), input.GetH()); } double t4 = omp_get_wtime(); if (!_Lnet106_stage(input, thirdBbox, results)) return false; double t5 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms, L: %.3f ms)\n", 1000 * (t5 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3), 1000 * (t5 - t4)); } return true; } private: void _compute_Pnet_single_thread(ZQ_CNN_Tensor4D_Interface& input, std::vector<std::vector<float> >& maps, std::vector<int>& mapH, std::vector<int>& mapW) { int scale_num = 0; for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } maps.resize(scale_num); for (int i = 0; i < scale_num; i++) { maps[i].resize(mapH[i] * mapW[i]); } for (int i = 0; i < scale_num; i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); float cur_scale_x = (float)width / changedW; float cur_scale_y = (float)height / changedH; double t10 = omp_get_wtime(); if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } double t11 = omp_get_wtime(); if (scales[i] != 1) pnet[0].Forward(pnet_images[i]); else pnet[0].Forward(input); double t12 = omp_get_wtime(); if (show_debug_info) printf("Pnet [%d]: resolution [%dx%d], resize:%.3f ms, cost:%.3f ms\n", i, changedW, changedH, 1000 * (t11 - t10), 1000 * (t12 - t11)); const ZQ_CNN_Tensor4D_Interface_Base* score = pnet[0].GetBlobByName("prob1"); //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { if (row < mapH[i] && col < mapW[i]) maps[i][row*mapW[i] + col] = *p; p += scorePixStep; } } } } void _compute_Pnet_multi_thread(ZQ_CNN_Tensor4D_Interface& input, std::vector<std::vector<float> >& maps, std::vector<int>& mapH, std::vector<int>& mapW) { if (thread_num <= 1) { for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic, 1) for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales[i] != 1) { input.ResizeBilinear(pnet_images[i], changedW, changedH, 0, 0); } } } int scale_num = 0; for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } maps.resize(scale_num); for (int i = 0; i < scale_num; i++) { maps[i].resize(mapH[i] * mapW[i]); } std::vector<int> task_rect_off_x; std::vector<int> task_rect_off_y; std::vector<int> task_rect_width; std::vector<int> task_rect_height; std::vector<float> task_scale; std::vector<int> task_scale_id; int stride = pnet_stride; const int block_size = 64 * stride; int cellsize = pnet_size; int border_size = cellsize - stride; int overlap_border_size = cellsize / stride; int jump_size = block_size - border_size; for (int i = 0; i < scales.size(); i++) { int changeH = (int)ceil(height*scales[i]); int changeW = (int)ceil(width*scales[i]); if (changeH < pnet_size || changeW < pnet_size) continue; int block_H_num = 0; int block_W_num = 0; int start = 0; while (start < changeH) { block_H_num++; if (start + block_size >= changeH) break; start += jump_size; } start = 0; while (start < changeW) { block_W_num++; if (start + block_size >= changeW) break; start += jump_size; } for (int s = 0; s < block_H_num; s++) { for (int t = 0; t < block_W_num; t++) { int rect_off_x = t * jump_size; int rect_off_y = s * jump_size; int rect_width = __min(changeW, rect_off_x + block_size) - rect_off_x; int rect_height = __min(changeH, rect_off_y + block_size) - rect_off_y; if (rect_width >= cellsize && rect_height >= cellsize) { task_rect_off_x.push_back(rect_off_x); task_rect_off_y.push_back(rect_off_y); task_rect_width.push_back(rect_width); task_rect_height.push_back(rect_height); task_scale.push_back(scales[i]); task_scale_id.push_back(i); } } } } // int task_num = task_scale.size(); std::vector<ZQ_CNN_Tensor4D_Interface> task_pnet_images(thread_num); if (thread_num <= 1) { for (int i = 0; i < task_num; i++) { int thread_id = omp_get_thread_num(); int scale_id = task_scale_id[i]; float cur_scale = task_scale[i]; int i_rect_off_x = task_rect_off_x[i]; int i_rect_off_y = task_rect_off_y[i]; int i_rect_width = task_rect_width[i]; int i_rect_height = task_rect_height[i]; if (scale_id == 0 && scales[0] == 1) { if (!input.ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } else { if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } if (!pnet[thread_id].Forward(task_pnet_images[thread_id])) continue; const ZQ_CNN_Tensor4D_Interface_Base* score = pnet[thread_id].GetBlobByName("prob1"); int task_count = 0; //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { int real_row = row + i_rect_off_y / stride; int real_col = col + i_rect_off_x / stride; if (real_row < mapH[scale_id] && real_col < mapW[scale_id]) maps[scale_id][real_row*mapW[scale_id] + real_col] = *p; p += scorePixStep; } } } } else { #pragma omp parallel for num_threads(thread_num) for (int i = 0; i < task_num; i++) { int thread_id = omp_get_thread_num(); int scale_id = task_scale_id[i]; float cur_scale = task_scale[i]; int i_rect_off_x = task_rect_off_x[i]; int i_rect_off_y = task_rect_off_y[i]; int i_rect_width = task_rect_width[i]; int i_rect_height = task_rect_height[i]; if (scale_id == 0 && scales[0] == 1) { if (!input.ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } else { if (!pnet_images[scale_id].ROI(task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height, 0, 0)) continue; } if (!pnet[thread_id].Forward(task_pnet_images[thread_id])) continue; const ZQ_CNN_Tensor4D_Interface_Base* score = pnet[thread_id].GetBlobByName("prob1"); int task_count = 0; //score p int scoreH = score->GetH(); int scoreW = score->GetW(); int scorePixStep = score->GetPixelStep(); const float *p = score->GetFirstPixelPtr() + 1; ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { int real_row = row + i_rect_off_y / stride; int real_col = col + i_rect_off_x / stride; if (real_row < mapH[scale_id] && real_col < mapW[scale_id]) maps[scale_id][real_row*mapW[scale_id] + real_col] = *p; p += scorePixStep; } } } } } bool _Pnet_stage(ZQ_CNN_Tensor4D_Interface& input, std::vector<ZQ_CNN_BBox>& firstBbox) { if (thread_num <= 0) return false; double t1 = omp_get_wtime(); firstBbox.clear(); std::vector<std::vector<float> > maps; std::vector<int> mapH; std::vector<int> mapW; if (thread_num == 1 && !force_run_pnet_multithread) { pnet[0].TurnOffShowDebugInfo(); //pnet[0].TurnOnShowDebugInfo(); _compute_Pnet_single_thread(input, maps, mapH, mapW); } else { _compute_Pnet_multi_thread(input, maps, mapH, mapW); } ZQ_CNN_OrderScore order; std::vector<std::vector<ZQ_CNN_BBox> > bounding_boxes(scales.size()); std::vector<std::vector<ZQ_CNN_OrderScore> > bounding_scores(scales.size()); const int block_size = 32; int stride = pnet_stride; int cellsize = pnet_size; int border_size = cellsize / stride; for (int i = 0; i < maps.size(); i++) { double t13 = omp_get_wtime(); int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; float cur_scale_x = (float)width / changedW; float cur_scale_y = (float)height / changedH; int count = 0; //score p int scoreH = mapH[i]; int scoreW = mapW[i]; const float *p = &maps[i][0]; if (scoreW <= block_size && scoreH < block_size) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bounding_boxes[i].push_back(bbox); bounding_scores[i].push_back(order); count++; } p++; } } int before_count = bounding_boxes[i].size(); ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int after_count = bounding_boxes[i].size(); for (int j = 0; j < after_count; j++) { ZQ_CNN_BBox& bbox = bounding_boxes[i][j]; bbox.row1 = round(bbox.row1 *cur_scale_y); bbox.col1 = round(bbox.col1 *cur_scale_x); bbox.row2 = round(bbox.row2 *cur_scale_y); bbox.col2 = round(bbox.col2 *cur_scale_x); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); } double t14 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count); } else { int before_count = 0, after_count = 0; int block_H_num = __max(1, scoreH / block_size); int block_W_num = __max(1, scoreW / block_size); int block_num = block_H_num*block_W_num; int width_per_block = scoreW / block_W_num; int height_per_block = scoreH / block_H_num; std::vector<std::vector<ZQ_CNN_BBox> > tmp_bounding_boxes(block_num); std::vector<std::vector<ZQ_CNN_OrderScore> > tmp_bounding_scores(block_num); std::vector<int> block_start_w(block_num), block_end_w(block_num); std::vector<int> block_start_h(block_num), block_end_h(block_num); for (int bh = 0; bh < block_H_num; bh++) { for (int bw = 0; bw < block_W_num; bw++) { int bb = bh * block_W_num + bw; block_start_w[bb] = (bw == 0) ? 0 : (bw*width_per_block - border_size); block_end_w[bb] = (bw == block_num - 1) ? scoreW : ((bw + 1)*width_per_block); block_start_h[bb] = (bh == 0) ? 0 : (bh*height_per_block - border_size); block_end_h[bb] = (bh == block_num - 1) ? scoreH : ((bh + 1)*height_per_block); } } int chunk_size = 1;// ceil((float)block_num / thread_num); if (thread_num <= 1) { for (int bb = 0; bb < block_num; bb++) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; int count = 0; for (int row = block_start_h[bb]; row < block_end_h[bb]; row++) { p = &maps[i][0] + row*scoreW + block_start_w[bb]; for (int col = block_start_w[bb]; col < block_end_w[bb]; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); tmp_bounding_boxes[bb].push_back(bbox); tmp_bounding_scores[bb].push_back(order); count++; } p++; } } int tmp_before_count = tmp_bounding_boxes[bb].size(); ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int tmp_after_count = tmp_bounding_boxes[bb].size(); before_count += tmp_before_count; after_count += tmp_after_count; } } else { #pragma omp parallel for schedule(dynamic, chunk_size) num_threads(thread_num) for (int bb = 0; bb < block_num; bb++) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; int count = 0; for (int row = block_start_h[bb]; row < block_end_h[bb]; row++) { const float* p = &maps[i][0] + row*scoreW + block_start_w[bb]; for (int col = block_start_w[bb]; col < block_end_w[bb]; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); tmp_bounding_boxes[bb].push_back(bbox); tmp_bounding_scores[bb].push_back(order); count++; } p++; } } int tmp_before_count = tmp_bounding_boxes[bb].size(); ZQ_CNN_BBoxUtils::_nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int tmp_after_count = tmp_bounding_boxes[bb].size(); before_count += tmp_before_count; after_count += tmp_after_count; } } count = 0; for (int bb = 0; bb < block_num; bb++) { std::vector<ZQ_CNN_BBox>::iterator it = tmp_bounding_boxes[bb].begin(); for (; it != tmp_bounding_boxes[bb].end(); it++) { if ((*it).exist) { bounding_boxes[i].push_back(*it); order.score = (*it).score; order.oriOrder = count; bounding_scores[i].push_back(order); count++; } } } //ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", 0); after_count = bounding_boxes[i].size(); for (int j = 0; j < after_count; j++) { ZQ_CNN_BBox& bbox = bounding_boxes[i][j]; bbox.row1 = round(bbox.row1 *cur_scale_y); bbox.col1 = round(bbox.col1 *cur_scale_x); bbox.row2 = round(bbox.row2 *cur_scale_y); bbox.col2 = round(bbox.col2 *cur_scale_x); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); } double t14 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count); } } std::vector<ZQ_CNN_OrderScore> firstOrderScore; int count = 0; for (int i = 0; i < scales.size(); i++) { std::vector<ZQ_CNN_BBox>::iterator it = bounding_boxes[i].begin(); for (; it != bounding_boxes[i].end(); it++) { if ((*it).exist) { firstBbox.push_back(*it); order.score = (*it).score; order.oriOrder = count; firstOrderScore.push_back(order); count++; } } } //the first stage's nms if (count < 1) return false; double t15 = omp_get_wtime(); ZQ_CNN_BBoxUtils::_nms(firstBbox, firstOrderScore, nms_thresh[0], "Union", 0, 1); ZQ_CNN_BBoxUtils::_refine_and_square_bbox(firstBbox, width, height, true); double t16 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms\n", 1000 * (t16 - t15)); if (show_debug_info) printf("first stage candidate count: %d\n", count); double t3 = omp_get_wtime(); if (show_debug_info) printf("stage 1: cost %.3f ms\n", 1000 * (t3 - t1)); return true; } bool _Rnet_stage(const ZQ_CNN_Tensor4D_Interface& input, std::vector<ZQ_CNN_BBox>& firstBbox, std::vector<ZQ_CNN_BBox>& secondBbox) { double t3 = omp_get_wtime(); secondBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = firstBbox.begin(); std::vector<ZQ_CNN_OrderScore> secondScore; std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int r_count = 0; for (; it != firstBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); r_count++; secondBbox.push_back(*it); } } } int batch_size = BATCH_SIZE; int per_num = ceil((float)r_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)r_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_Interface> task_rnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_secondBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(r_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_secondBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_secondBbox[i][j] = secondBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } rnet[0].Forward(task_rnet_images[pp]); const ZQ_CNN_Tensor4D_Interface_Base* score = rnet[0].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D_Interface_Base* location = rnet[0].GetBlobByName("conv5-2"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int task_count = 0; for (int i = 0; i < task_secondBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[1]) { for (int j = 0; j < 4; j++) task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_secondBbox[pp][i].exist = false; } } if (task_count < 1) { task_secondBbox[pp].clear(); continue; } for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--) { if (!task_secondBbox[pp][i].exist) task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_rnet_images[pp], rnet_size, rnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } rnet[thread_id].Forward(task_rnet_images[pp]); const ZQ_CNN_Tensor4D_Interface_Base* score = rnet[thread_id].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D_Interface_Base* location = rnet[thread_id].GetBlobByName("conv5-2"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int task_count = 0; for (int i = 0; i < task_secondBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[1]) { for (int j = 0; j < 4; j++) task_secondBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; task_secondBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_secondBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_secondBbox[pp][i].exist = false; } } if (task_count < 1) { task_secondBbox[pp].clear(); continue; } for (int i = task_secondBbox[pp].size() - 1; i >= 0; i--) { if (!task_secondBbox[pp][i].exist) task_secondBbox[pp].erase(task_secondBbox[pp].begin() + i); } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_secondBbox[i].size(); } secondBbox.resize(count); secondScore.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_secondBbox[i].size(); j++) { secondBbox[id] = task_secondBbox[i][j]; secondScore[id].score = secondBbox[id].score; secondScore[id].oriOrder = id; id++; } } //ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Union"); ZQ_CNN_BBoxUtils::_nms(secondBbox, secondScore, nms_thresh[1], "Min"); ZQ_CNN_BBoxUtils::_refine_and_square_bbox(secondBbox, width, height, true); count = secondBbox.size(); double t4 = omp_get_wtime(); if (show_debug_info) printf("run Rnet [%d] times, candidate after nms: %d \n", r_count, count); if (show_debug_info) printf("stage 2: cost %.3f ms\n", 1000 * (t4 - t3)); return true; } bool _Onet_stage(const ZQ_CNN_Tensor4D_Interface& input, std::vector<ZQ_CNN_BBox>& secondBbox, std::vector<ZQ_CNN_BBox>& thirdBbox) { double t4 = omp_get_wtime(); thirdBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = secondBbox.begin(); std::vector<ZQ_CNN_OrderScore> thirdScore; std::vector<ZQ_CNN_BBox> early_accept_thirdBbox; std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int o_count = 0; for (; it != secondBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { if (!do_landmark && it->score > early_accept_thresh) { early_accept_thirdBbox.push_back(*it); } else { src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); o_count++; thirdBbox.push_back(*it); } } } } int batch_size = BATCH_SIZE; int per_num = ceil((float)o_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)o_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_Interface> task_onet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_thirdBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(o_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_thirdBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_thirdBbox[i][j] = thirdBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0 || task_src_off_x[pp].size() == 0) continue; if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); onet[0].Forward(task_onet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D_Interface_Base* score = onet[0].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D_Interface_Base* location = onet[0].GetBlobByName("conv6-2"); const ZQ_CNN_Tensor4D_Interface_Base* keyPoint = onet[0].GetBlobByName("conv6-3"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); const float* keyPoint_ptr = 0; if (keyPoint != 0) keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int keyPoint_sliceStep = 0; if (keyPoint != 0) keyPoint_sliceStep = keyPoint->GetSliceStep(); int task_count = 0; ZQ_CNN_OrderScore order; for (int i = 0; i < task_thirdBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[2]) { for (int j = 0; j < 4; j++) task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; if (keyPoint != 0) { for (int num = 0; num < 5; num++) { task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 + (task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 + (task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_thirdBbox[pp][i].exist = false; } } if (task_count < 1) { task_thirdBbox[pp].clear(); continue; } for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--) { if (!task_thirdBbox[pp][i].exist) task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i); } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0 || task_src_off_x[pp].size() == 0) continue; if (!input.ResizeBilinearRect(task_onet_images[pp], onet_size, onet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); onet[thread_id].Forward(task_onet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D_Interface_Base* score = onet[thread_id].GetBlobByName("prob1"); const ZQ_CNN_Tensor4D_Interface_Base* location = onet[thread_id].GetBlobByName("conv6-2"); const ZQ_CNN_Tensor4D_Interface_Base* keyPoint = onet[thread_id].GetBlobByName("conv6-3"); const float* score_ptr = score->GetFirstPixelPtr(); const float* location_ptr = location->GetFirstPixelPtr(); const float* keyPoint_ptr = 0; if (keyPoint != 0) keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int score_sliceStep = score->GetSliceStep(); int location_sliceStep = location->GetSliceStep(); int keyPoint_sliceStep = 0; if (keyPoint != 0) keyPoint_sliceStep = keyPoint->GetSliceStep(); int task_count = 0; ZQ_CNN_OrderScore order; for (int i = 0; i < task_thirdBbox[pp].size(); i++) { if (score_ptr[i*score_sliceStep + 1] > thresh[2]) { for (int j = 0; j < 4; j++) task_thirdBbox[pp][i].regreCoord[j] = location_ptr[i*location_sliceStep + j]; if (keyPoint != 0) { for (int num = 0; num < 5; num++) { task_thirdBbox[pp][i].ppoint[num] = task_thirdBbox[pp][i].col1 + (task_thirdBbox[pp][i].col2 - task_thirdBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_thirdBbox[pp][i].ppoint[num + 5] = task_thirdBbox[pp][i].row1 + (task_thirdBbox[pp][i].row2 - task_thirdBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } task_thirdBbox[pp][i].area = task_src_rect_w[pp][i] * task_src_rect_h[pp][i]; task_thirdBbox[pp][i].score = score_ptr[i*score_sliceStep + 1]; task_count++; } else { task_thirdBbox[pp][i].exist = false; } } if (task_count < 1) { task_thirdBbox[pp].clear(); continue; } for (int i = task_thirdBbox[pp].size() - 1; i >= 0; i--) { if (!task_thirdBbox[pp][i].exist) task_thirdBbox[pp].erase(task_thirdBbox[pp].begin() + i); } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_thirdBbox[i].size(); } thirdBbox.resize(count); thirdScore.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_thirdBbox[i].size(); j++) { thirdBbox[id] = task_thirdBbox[i][j]; thirdScore[id].score = task_thirdBbox[i][j].score; thirdScore[id].oriOrder = id; id++; } } ZQ_CNN_BBoxUtils::_refine_and_square_bbox(thirdBbox, width, height, false); ZQ_CNN_OrderScore order; for (int i = 0; i < early_accept_thirdBbox.size(); i++) { order.score = early_accept_thirdBbox[i].score; order.oriOrder = count++; thirdScore.push_back(order); thirdBbox.push_back(early_accept_thirdBbox[i]); } ZQ_CNN_BBoxUtils::_nms(thirdBbox, thirdScore, nms_thresh[2], "Min"); double t5 = omp_get_wtime(); if (show_debug_info) printf("run Onet [%d] times, candidate before nms: %d \n", o_count, count); if (show_debug_info) printf("stage 3: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } bool _Lnet_stage(const ZQ_CNN_Tensor4D_Interface& input, std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox>& fourthBbox) { double t4 = omp_get_wtime(); fourthBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin(); std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int l_count = 0; for (; it != thirdBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { l_count++; fourthBbox.push_back(*it); } } } std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox; ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height); for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); } int batch_size = BATCH_SIZE; int per_num = ceil((float)l_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)l_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_Interface> task_lnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox> > task_fourthBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(l_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_fourthBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_fourthBbox[i][j] = copy_fourthBbox[st_id + j]; } } } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[0].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D_Interface_Base* keyPoint = lnet[0].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < 5; num++) { task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[thread_id].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D_Interface_Base* keyPoint = lnet[thread_id].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < 5; num++) { task_fourthBbox[pp][i].ppoint[num] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num]; task_fourthBbox[pp][i].ppoint[num + 5] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num + 5]; } } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_fourthBbox[i].size(); } fourthBbox.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_fourthBbox[i].size(); j++) { memcpy(fourthBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 10); id++; } } double t5 = omp_get_wtime(); if (show_debug_info) printf("run Lnet [%d] times \n", l_count); if (show_debug_info) printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } bool _Lnet106_stage(const ZQ_CNN_Tensor4D_Interface& input, std::vector<ZQ_CNN_BBox>& thirdBbox, std::vector<ZQ_CNN_BBox106>& resultBbox) { double t4 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> fourthBbox; std::vector<ZQ_CNN_BBox>::iterator it = thirdBbox.begin(); std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int l_count = 0; for (; it != thirdBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { l_count++; fourthBbox.push_back(*it); } } } std::vector<ZQ_CNN_BBox> copy_fourthBbox = fourthBbox; ZQ_CNN_BBoxUtils::_square_bbox(copy_fourthBbox, width, height); for (it = copy_fourthBbox.begin(); it != copy_fourthBbox.end(); ++it) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); } int batch_size = BATCH_SIZE; int per_num = ceil((float)l_count / thread_num); int need_thread_num = thread_num; if (per_num > batch_size) { need_thread_num = ceil((float)l_count / batch_size); per_num = batch_size; } std::vector<ZQ_CNN_Tensor4D_NHW_C_Align128bit> task_lnet_images(need_thread_num); std::vector<std::vector<int> > task_src_off_x(need_thread_num); std::vector<std::vector<int> > task_src_off_y(need_thread_num); std::vector<std::vector<int> > task_src_rect_w(need_thread_num); std::vector<std::vector<int> > task_src_rect_h(need_thread_num); std::vector<std::vector<ZQ_CNN_BBox106> > task_fourthBbox(need_thread_num); for (int i = 0; i < need_thread_num; i++) { int st_id = per_num*i; int end_id = __min(l_count, per_num*(i + 1)); int cur_num = end_id - st_id; if (cur_num > 0) { task_src_off_x[i].resize(cur_num); task_src_off_y[i].resize(cur_num); task_src_rect_w[i].resize(cur_num); task_src_rect_h[i].resize(cur_num); task_fourthBbox[i].resize(cur_num); for (int j = 0; j < cur_num; j++) { task_src_off_x[i][j] = src_off_x[st_id + j]; task_src_off_y[i][j] = src_off_y[st_id + j]; task_src_rect_w[i][j] = src_rect_w[st_id + j]; task_src_rect_h[i][j] = src_rect_h[st_id + j]; task_fourthBbox[i][j].col1 = copy_fourthBbox[st_id + j].col1; task_fourthBbox[i][j].col2 = copy_fourthBbox[st_id + j].col2; task_fourthBbox[i][j].row1 = copy_fourthBbox[st_id + j].row1; task_fourthBbox[i][j].row2 = copy_fourthBbox[st_id + j].row2; task_fourthBbox[i][j].area = copy_fourthBbox[st_id + j].area; task_fourthBbox[i][j].score = copy_fourthBbox[st_id + j].score; task_fourthBbox[i][j].exist = copy_fourthBbox[st_id + j].exist; } } } resultBbox.resize(l_count); for (int i = 0; i < l_count; i++) { resultBbox[i].col1 = fourthBbox[i].col1; resultBbox[i].col2 = fourthBbox[i].col2; resultBbox[i].row1 = fourthBbox[i].row1; resultBbox[i].row2 = fourthBbox[i].row2; resultBbox[i].score = fourthBbox[i].score; resultBbox[i].exist = fourthBbox[i].exist; resultBbox[i].area = fourthBbox[i].area; } if (thread_num <= 1) { for (int pp = 0; pp < need_thread_num; pp++) { if (task_src_off_x[pp].size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[0].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D_Interface_Base* keyPoint = lnet[0].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keypoint_num = keyPoint->GetC() / 2; int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < keypoint_num; num++) { if ((num >= 33 && num < 43) || (num >= 64 && num < 72) || (num >= 84 && num < 104)) { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]/**0.25*/; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]/**0.25*/; } else { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]; } } } } } else { #pragma omp parallel for num_threads(thread_num) for (int pp = 0; pp < need_thread_num; pp++) { int thread_id = omp_get_thread_num(); if (task_src_off_x.size() == 0) continue; if (!input.ResizeBilinearRect(task_lnet_images[pp], lnet_size, lnet_size, 0, 0, task_src_off_x[pp], task_src_off_y[pp], task_src_rect_w[pp], task_src_rect_h[pp])) { continue; } double t31 = omp_get_wtime(); lnet[thread_id].Forward(task_lnet_images[pp]); double t32 = omp_get_wtime(); const ZQ_CNN_Tensor4D_Interface_Base* keyPoint = lnet[thread_id].GetBlobByName("conv6-3"); const float* keyPoint_ptr = keyPoint->GetFirstPixelPtr(); int keypoint_num = keyPoint->GetC() / 2; int keyPoint_sliceStep = keyPoint->GetSliceStep(); for (int i = 0; i < task_fourthBbox[pp].size(); i++) { for (int num = 0; num < keypoint_num; num++) { if ((num >= 33 && num < 43) || (num >= 64 && num < 72) || (num >= 84 && num < 104)) { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2] * 0.5; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1] * 0.5; } else { task_fourthBbox[pp][i].ppoint[num * 2] = task_fourthBbox[pp][i].col1 + (task_fourthBbox[pp][i].col2 - task_fourthBbox[pp][i].col1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2]; task_fourthBbox[pp][i].ppoint[num * 2 + 1] = task_fourthBbox[pp][i].row1 + (task_fourthBbox[pp][i].row2 - task_fourthBbox[pp][i].row1)*keyPoint_ptr[i*keyPoint_sliceStep + num * 2 + 1]; } } } } } int count = 0; for (int i = 0; i < need_thread_num; i++) { count += task_fourthBbox[i].size(); } resultBbox.resize(count); int id = 0; for (int i = 0; i < need_thread_num; i++) { for (int j = 0; j < task_fourthBbox[i].size(); j++) { memcpy(resultBbox[id].ppoint, task_fourthBbox[i][j].ppoint, sizeof(float) * 212); id++; } } double t5 = omp_get_wtime(); if (show_debug_info) printf("run Lnet [%d] times \n", l_count); if (show_debug_info) printf("stage 4: cost %.3f ms\n", 1000 * (t5 - t4)); return true; } void _select(std::vector<ZQ_CNN_BBox>& bbox, int limit_num, int width, int height) { int in_num = bbox.size(); if (limit_num >= in_num) return; bbox.resize(limit_num); } }; } #endif
gen05.c
/* Description: This program implements my Genetic Algorithm method of solving the "N-Queens Problem" Author: Georgios Evangelou (1046900) Year: 5 Parallel Programming in Machine Learning Problems Electrical and Computer Engineering Department, University of Patras System Specifications: CPU: AMD Ryzen 2600 (6 cores/12 threads, @3.8 GHz, 6786.23 bogomips) GPU: Nvidia GTX 1050 (dual-fan, overclocked) RAM: 8GB (dual-channel, @2666 MHz) Version Notes: Compiles/Runs/Debugs with: gcc gen05.c -o gen05 -lm -fopt-info -fopenmp -pg && time ./gen05 && gprof ./gen05 Inherits all features of previous version if not stated otherwise Almost identical to gen04, but achieves double execution speed after creating and using a custom thread-safe random-generator function Without any optimizations and 12 threads reported: For N=200 queens a solution is found after: ~0m13,301s and 89 generations, using 600 genes per thread(1054 summed generations) CHECK gen04 FOR MORE DETAILS... (only difference is the custom thread-safe random-generator function and, thus, the faster execution speed) */ // **************************************************************************************************************** //#pragma GCC optimize("O3","unroll-loops","omit-frame-pointer","inline") //Apply O3 and extra optimizations //#pragma GCC option("arch=native","tune=native","no-zero-upper") //Adapt to the current system //#pragma GCC target("avx") //Enable AVX // **************************************************************************************************************** #include "stdio.h" #include "stdlib.h" #include "math.h" #include "stdbool.h" #include "time.h" #include "omp.h" // **************************************************************************************************************** #define N 16 //Number of queens #define GENES 20 //Number of genes (must by even) #define TARGET_THREADS 12 //Number of threads to ask /** * Produces a random integer in the range [mini,maxi] */ int RandomInteger2(int mini, int maxi, unsigned *seed) { *seed = (unsigned) (1664525 * (*seed) + 1013904223 )% RAND_MAX; int gap = maxi-mini; int randomInGap = (int) (gap * ((float)(*seed))/((float)RAND_MAX) ); //[0,gap] return mini + randomInGap; //[mini,mini+gap]==[mini,maxi] } /** * Initializes positional array given */ void GeneInitialization(int genes[GENES][N]) { unsigned seed = 1046900; for (int i=0; i<GENES; i++) { for (int j=0; j<N; j++) { genes[i][j] = RandomInteger2(0,N-1, &seed); } } } /** * Prints a map of the queens until the M-th positioned queen */ void Map3(int posY[N], int M) { for (int i=0; i<N; i++) printf("==="); printf("===\n---"); for (int i=0; i<N/3; i++) printf("---"); printf(" FITTEST GENE "); for (int i=0; i<N/3; i++) printf("---"); printf("---\n==="); for (int i=0; i<N; i++) printf("==="); printf("\n"); for (int i=0; i<N; i++) printf("---"); printf("---\n##|"); for (int i=0; i<N; i++) printf("%2d ", i+1); printf("\n---"); for (int i=0; i<N; i++) printf("---"); printf("\n"); for (int y=0; y<N; y++) { printf("%2d| ", y+1); for (int x=0; x<N; x++) { bool flag = false; for (int i=0; i<M; i++) { if (i==x && posY[i]==y) { flag = true; } } if (flag) printf("Q"); else printf("~"); printf(" "); } printf("\n"); } for (int i=0; i<N; i++) printf("---"); printf("---\n"); } /** * Checks if a position is safe */ bool isSafeFromPrevious(int posY[N], int x, int y) { int currentQueen = x; for (int oldQueen=0; oldQueen<currentQueen; oldQueen++) { //printf(" Checking %d %d and %d %d \n",posX[q],posY[q],x,y); if (oldQueen==x || posY[oldQueen]==y) return false; //If row/column is endangered else if (y==posY[oldQueen]+(currentQueen-oldQueen) || y==posY[oldQueen]-(currentQueen-oldQueen)) return false; //If diagonal is endangered } return true; } /** * Finds the number collisions between the queens */ int UtilityFunction(int posY[N]) { int collisions = 0; for (int crnt=1; crnt<N; crnt++) { for (int old=0; old<crnt; old++) { if (old==crnt || posY[old]==posY[crnt]) collisions++; //If row/column is endangered else if (posY[crnt]==posY[old]+(crnt-old) || posY[crnt]==posY[old]-(crnt-old)) collisions++; //If diagonal is endangered } } return collisions; } /** * Takes two parent genes and produces two child genes */ void CrossoverFunction(int gene1[N], int gene2[N]) { for (int i=1; i<N; i++) { if (abs(gene1[i-1]-gene1[i])<2 || abs(gene2[i-1]-gene2[i])<2) { int temp = gene1[i]; gene1[i] = gene2[i]; gene2[i] = temp; } } } /** * Takes a gene and mutates it */ void MutationFunction(int gene[N], unsigned *seed) { // Mark all values missing from the gene, so they can be used to replace duplicates int inGene[N] = {0}; // Un-mark all existing values for (int i=0; i<N; i++) { inGene[gene[i]] = 1; } // Find duplicates and replace them with non-used values for (int i=1; i<N; i++) { for (int j=0; j<i; j++) { if (gene[i]==gene[j]) { for (int k=0; k<N; k++){ if (inGene[k]==0) { gene[i] = k; inGene[k] = 1; k = N; } } } } } // Performs the actual swapping int barrier = RandomInteger2(1,N-3, seed); // [1, N-3] int swapA = RandomInteger2(0,barrier, seed); // [0,barrier] int swapB = RandomInteger2(barrier+1,N-1, seed); // [barrier+1,N-1] int temp = gene[swapA]; gene[swapA] = gene[swapB]; gene[swapB] = temp; } /** * Breeds next generation */ void BreedGeneration(int genes[GENES][N], int utilityValues[GENES], unsigned *seed) { int genesNew[GENES][N] = {-1}; // For all pairs of genes to create for (int i=0; i<GENES-1; i+=2) { int index1 = -1, index2 = -1; float limit_value = INFINITY; float value1 = limit_value, value2 = limit_value; //...access all current genes and in a semi-stochastic way, pick two low-value parents for (int j=0; j<GENES; j++) { float value = (float) (10 + RandomInteger2(10,20, seed)*utilityValues[j] ); if (value<=value1) { value2 = value1; index2 = index1; value1 = value; index1 = j; } else if (value<value2) { value2 = value; index2 = j; } } //...then copy the parents to the new array for (int k=0; k<N; k++) { genesNew[i][k] = genes[index1][k]; genesNew[i+1][k] = genes[index2][k]; } //...breed and mutate their children CrossoverFunction(genesNew[i], genesNew[i+1]); MutationFunction(genesNew[i], seed); MutationFunction(genesNew[i+1], seed); } // Finally copy the new genes into the old ones for (int i=0; i<GENES; i++) { for (int j=0; j<N; j++) { genes[i][j] = genesNew[i][j]; } } } /** * Calculate and store all current genes utility values */ unsigned CalculateAllUtilityValues(int genes[GENES][N], int utilityValues[GENES]) { int bestUtilityValueFoundAt = 0; for (int i=0; i<GENES; i++) { utilityValues[i] = UtilityFunction(genes[i]); if (utilityValues[i] < utilityValues[bestUtilityValueFoundAt]) { bestUtilityValueFoundAt = i; } } return bestUtilityValueFoundAt; } /** * Runs the genetic algorithm to solve the problem */ long int Solve(int fittestGene[N], unsigned threadID, int *whoHasFinished, unsigned *solversGenerations) { unsigned seed = threadID; int genes[GENES][N]; int utilityValues[GENES] = {1}; //Create a random set of genes GeneInitialization(genes); long int generation = 0; unsigned bestGene = 0; //While no solution is found while(utilityValues[bestGene]!=0 && *whoHasFinished<0) { generation++; //...for each repetition create the next generation of genes BreedGeneration(genes, utilityValues, &seed); //...and calculate all genes's utility values bestGene = CalculateAllUtilityValues(genes, utilityValues); } //After a correct gene has been found, store its details in variables visible to main() #pragma omp critical { *whoHasFinished = threadID; *solversGenerations = generation; for (int i=0; i<N; i++) fittestGene[i] = genes[bestGene][i]; } return generation; } /** * The main program */ int main() { printf("\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"); printf("This program implements my Genetic Algorithm method of solving the \"N-Queens Problem\".\n"); printf("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n"); int fittestGene[N] = {0}; int numberOfThreads = 1, whoHasFinished = -1; unsigned solversGenerations = 0; long int totalGenerations = 0; printf("Queens set at: %d Genes set at: %d\n", N, GENES); printf("Now solving the problem. Please wait...\n"); #pragma omp parallel num_threads(TARGET_THREADS) reduction(+:totalGenerations) { //Check how many threads were created #pragma omp single numberOfThreads = omp_get_num_threads(); //Tell each thread to start searching for a solution totalGenerations = Solve(fittestGene, omp_get_thread_num(), &whoHasFinished, &solversGenerations); } printf("Algorithm completed. Number of threads used: %d Total generations: %ld\n", numberOfThreads, totalGenerations); printf("Solution found by thread #%d in #%u generations.\n", whoHasFinished, solversGenerations); printf("The solution found is:\n"); //Map3(fittestGene, N); return 0; }
GB_bitmap_AxB_saxpy_A_bitmap_B_bitmap_template.c
//------------------------------------------------------------------------------ // GB_bitmap_AxB_saxpy_A_bitmap_B_bitmap: C<#M>+=A*B, C bitmap, M any format //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ { int64_t tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:cnvals) for (tid = 0 ; tid < ntasks ; tid++) { //---------------------------------------------------------------------- // get the task to compute C (I,J) //---------------------------------------------------------------------- int64_t I_tid = tid / nI_tasks ; int64_t J_tid = tid % nI_tasks ; // I = istart:iend-1 int64_t istart = I_tid * GB_TILE_SIZE ; int64_t iend = GB_IMIN (avlen, istart + GB_TILE_SIZE) ; // J = jstart:jend-1 int64_t jstart = J_tid * GB_TILE_SIZE ; int64_t jend = GB_IMIN (bvdim, jstart + GB_TILE_SIZE) ; int64_t task_cnvals = 0 ; //---------------------------------------------------------------------- // check if any entry in the M(I,J) mask permits any change to C(I,J) //---------------------------------------------------------------------- #if GB_MASK_IS_SPARSE_OR_HYPER || GB_MASK_IS_BITMAP_OR_FULL bool any_update_allowed = false ; for (int64_t j = jstart ; j < jend && !any_update_allowed ; j++) { for (int64_t i = istart ; i < iend && !any_update_allowed ; i++) { //---------------------------------------------------------- // get pointer to C(i,j) and M(i,j) //---------------------------------------------------------- int64_t pC = j * avlen + i ; //---------------------------------------------------------- // check M(i,j) //---------------------------------------------------------- #if GB_MASK_IS_SPARSE_OR_HYPER // M is sparse or hypersparse int8_t cb = Cb [pC] ; bool mij = (cb & 2) ; #elif GB_MASK_IS_BITMAP_OR_FULL // M is bitmap or full GB_GET_M_ij (pC) ; #endif if (Mask_comp) mij = !mij ; if (!mij) continue ; any_update_allowed = true ; } } if (!any_update_allowed) { // C(I,J) cannot be modified at all; skip it continue ; } #endif //---------------------------------------------------------------------- // declare local storage for this task //---------------------------------------------------------------------- // GB_ATYPE Ax_cache [GB_TILE_SIZE * GB_KTILE_SIZE] ; // int8_t Ab_cache [GB_TILE_SIZE * GB_KTILE_SIZE] ; bool Ab_any_in_row [GB_TILE_SIZE] ; //---------------------------------------------------------------------- // C<#M>(I,J) += A(I,:) * B(:,J) //---------------------------------------------------------------------- for (int64_t kstart = 0 ; kstart < avdim ; kstart += GB_KTILE_SIZE) { // K = kstart:kend-1 int64_t kend = GB_IMIN (avdim, kstart + GB_KTILE_SIZE) ; //------------------------------------------------------------------ // TODO: load A(I,K) into local storage //------------------------------------------------------------------ // For built-in semirings, load A(I,K) into local storage of size // GB_TILE_SIZE * GB_KTILE_SIZE and transpose it. Load in the // bitmap Ab if not NULL, and Ax if not NULL. #if 0 for (int64_t k = kstart ; k < kend ; k++) { for (int64_t i = istart ; i < iend ; i++) { int64_t pA = i + k * avlen ; int8_t ab = GBB (Ab, pA) ; i_local = i - istart ; k_local = k - kstart ; Ab_cache [i_local * GB_KTILE_SIZE ... } } #endif //------------------------------------------------------------------ // Check for entries in each row of A(I,K) //------------------------------------------------------------------ if (A_is_bitmap) { for (int i = 0 ; i < GB_TILE_SIZE ; i++) { Ab_any_in_row [i] = false ; } for (int64_t k = kstart ; k < kend ; k++) { for (int64_t i = istart ; i < iend ; i++) { int64_t pA = i + k * avlen ; // get pointer to A(i,k) int8_t ab = Ab [pA] ; // Ab_cache [(i-istart) * GB_KTILE_SIZE + (k-kstart)] // = ab ; Ab_any_in_row [i-istart] |= ab ; } } } //------------------------------------------------------------------ // C<#M>(I,J) += A(I,K) * B(K,J) //------------------------------------------------------------------ for (int64_t j = jstart ; j < jend ; j++) { //-------------------------------------------------------------- // B is bitmap or full: check if any B(K,j) entry exists //-------------------------------------------------------------- if (B_is_bitmap) { int b = 0 ; for (int64_t k = kstart ; k < kend ; k++) { int64_t pB = k + j * bvlen ; // pointer to B(k,j) b += Bb [pB] ; } if (b == 0) { // no entry exists in B(K,j) continue ; } } //-------------------------------------------------------------- // C<#M>(I,j) += A(I,K) * B(K,j) //-------------------------------------------------------------- GB_GET_T_FOR_SECONDJ ; for (int64_t i = istart ; i < iend ; i++) { //---------------------------------------------------------- // skip if A(i,K) has no entries //---------------------------------------------------------- if (A_is_bitmap && !Ab_any_in_row [i - istart]) { continue ; } //---------------------------------------------------------- // get C(i,j) //---------------------------------------------------------- int64_t pC = i + j * avlen ; //---------------------------------------------------------- // check M(i,j) //---------------------------------------------------------- #if GB_MASK_IS_SPARSE_OR_HYPER // M is sparse or hypersparse int8_t cb = Cb [pC] ; bool mij = (cb & 2) ; if (Mask_comp) mij = !mij ; if (!mij) continue ; cb = (cb & 1) ; #elif GB_MASK_IS_BITMAP_OR_FULL // M is bitmap or full GB_GET_M_ij (pC) ; if (Mask_comp) mij = !mij ; if (!mij) continue ; int8_t cb = Cb [pC] ; #else // no mask int8_t cb = Cb [pC] ; #endif //---------------------------------------------------------- // C(i,j) += A(i,K) * B(K,j) //---------------------------------------------------------- if (cb == 0) { //------------------------------------------------------ // C(i,j) does not yet exist //------------------------------------------------------ for (int64_t k = kstart ; k < kend ; k++) { int64_t pA = i + k * avlen ; // pointer to A(i,k) int64_t pB = k + j * bvlen ; // pointer to B(k,j) if (!GBB (Ab, pA)) continue ; if (!GBB (Bb, pB)) continue ; GB_GET_B_kj ; // get B(k,j) GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j) if (cb == 0) { // C(i,j) = A(i,k) * B(k,j) GB_CIJ_WRITE (pC, t) ; Cb [pC] = keep ; cb = keep ; task_cnvals++ ; } else { // C(i,j) += A(i,k) * B(k,j) GB_CIJ_UPDATE (pC, t) ; } } } else { //------------------------------------------------------ // C(i,j) already exists //------------------------------------------------------ #if !GB_IS_ANY_PAIR_SEMIRING for (int64_t k = kstart ; k < kend ; k++) { int64_t pA = i + k * avlen ; // pointer to A(i,k) int64_t pB = k + j * bvlen ; // pointer to B(k,j) if (!GBB (Ab, pA)) continue ; if (!GBB (Bb, pB)) continue ; GB_GET_B_kj ; // get B(k,j) GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j) // C(i,j) += A(i,k) * B(k,j) GB_CIJ_UPDATE (pC, t) ; } #endif } } } } cnvals += task_cnvals ; } }
residual_criteria.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // #if !defined(KRATOS_RESIDUAL_CRITERIA ) #define KRATOS_RESIDUAL_CRITERIA // System includes // External includes // Project includes #include "includes/model_part.h" #include "includes/define.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualCriteria * @ingroup KratosCore * @brief This is a convergence criteria that employes the residual as criteria * @details The reactions from the RHS are not computed in the residual * @author Riccardo Rossi */ template<class TSparseSpace, class TDenseSpace > class ResidualCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION( ResidualCriteria ); /// The definition of the base ConvergenceCriteria typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; /// The data type typedef typename BaseType::TDataType TDataType; /// The dofs array type typedef typename BaseType::DofsArrayType DofsArrayType; /// The sparse matrix type typedef typename BaseType::TSystemMatrixType TSystemMatrixType; /// The dense vector type typedef typename BaseType::TSystemVectorType TSystemVectorType; /// Definition of the IndexType typedef std::size_t IndexType; /// Definition of the size type typedef std::size_t SizeType; ///@} ///@name Life Cycle ///@{ //* Constructor. explicit ResidualCriteria(Kratos::Parameters Settings) : BaseType() { if (Settings.Has("residual_absolute_tolerance")) { mAlwaysConvergedNorm = Settings["residual_absolute_tolerance"].GetDouble(); } else if (Settings.Has("absolute_tolerance")) { mAlwaysConvergedNorm = Settings["absolute_tolerance"].GetDouble(); } else { KRATOS_WARNING("ResidualCriteria") << "residual_absolute_tolerance or absolute_tolerance nor defined on settings. Using default 1.0e-9" << std::endl; mAlwaysConvergedNorm = 1.0e-9; } if (Settings.Has("residual_relative_tolerance")) { mRatioTolerance = Settings["residual_relative_tolerance"].GetDouble(); } else if (Settings.Has("relative_tolerance")) { mRatioTolerance = Settings["relative_tolerance"].GetDouble(); } else { KRATOS_WARNING("ResidualCriteria") << "residual_relative_tolerance or relative_tolerance nor defined on settings. Using default 1.0e-4" << std::endl; mRatioTolerance = 1.0e-4; } this->mActualizeRHSIsNeeded = true; } //* Constructor. explicit ResidualCriteria( TDataType NewRatioTolerance, TDataType AlwaysConvergedNorm) : BaseType(), mRatioTolerance(NewRatioTolerance), mAlwaysConvergedNorm(AlwaysConvergedNorm) { this->mActualizeRHSIsNeeded = true; } //* Copy constructor. explicit ResidualCriteria( ResidualCriteria const& rOther ) :BaseType(rOther) ,mRatioTolerance(rOther.mRatioTolerance) ,mInitialResidualNorm(rOther.mInitialResidualNorm) ,mCurrentResidualNorm(rOther.mCurrentResidualNorm) ,mAlwaysConvergedNorm(rOther.mAlwaysConvergedNorm) ,mReferenceDispNorm(rOther.mReferenceDispNorm) { this->mActualizeRHSIsNeeded = true; } //* Destructor. ~ResidualCriteria() override {} ///@} ///@name Operators ///@{ /** * @brief Criterias that need to be called after getting the solution * @details Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual + reactions) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { const SizeType size_b = TSparseSpace::Size(rb); if (size_b != 0) { //if we are solving for something SizeType size_residual; CalculateResidualNorm(rModelPart, mCurrentResidualNorm, size_residual, rDofSet, rb); TDataType ratio = 0.0; if(mInitialResidualNorm < std::numeric_limits<TDataType>::epsilon()) { ratio = 0.0; } else { ratio = mCurrentResidualNorm/mInitialResidualNorm; } const TDataType float_size_residual = static_cast<TDataType>(size_residual); const TDataType absolute_norm = (mCurrentResidualNorm/float_size_residual); KRATOS_INFO_IF("RESIDUAL CRITERION", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << " :: [ Initial residual norm = " << mInitialResidualNorm << "; Current residual norm = " << mCurrentResidualNorm << "]" << std::endl; KRATOS_INFO_IF("RESIDUAL CRITERION", this->GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) << " :: [ Obtained ratio = " << ratio << "; Expected ratio = " << mRatioTolerance << "; Absolute norm = " << absolute_norm << "; Expected norm = " << mAlwaysConvergedNorm << "]" << std::endl; rModelPart.GetProcessInfo()[CONVERGENCE_RATIO] = ratio; rModelPart.GetProcessInfo()[RESIDUAL_NORM] = absolute_norm; if (ratio <= mRatioTolerance || absolute_norm < mAlwaysConvergedNorm) { KRATOS_INFO_IF("RESIDUAL CRITERION", this->GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) << "Convergence is achieved" << std::endl; return true; } else { return false; } } else { return true; } } /** * @brief This function initialize the convergence criteria * @param rModelPart Reference to the ModelPart containing the problem. (unused) */ void Initialize(ModelPart& rModelPart) override { BaseType::Initialize(rModelPart); } /** * @brief This function initializes the solution step * @param rModelPart Reference to the ModelPart containing the problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual + reactions) */ void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { BaseType::InitializeSolutionStep(rModelPart, rDofSet, rA, rDx, rb); // Filling mActiveDofs when MPC exist if (rModelPart.NumberOfMasterSlaveConstraints() > 0) { mActiveDofs.resize(rDofSet.size()); #pragma omp parallel for for(int i=0; i<static_cast<int>(mActiveDofs.size()); ++i) { mActiveDofs[i] = true; } #pragma omp parallel for for (int i = 0; i<static_cast<int>(rDofSet.size()); ++i) { const auto it_dof = rDofSet.begin() + i; if (it_dof->IsFixed()) { mActiveDofs[it_dof->EquationId()] = false; } } for (const auto& r_mpc : rModelPart.MasterSlaveConstraints()) { for (const auto& r_dof : r_mpc.GetMasterDofsVector()) { mActiveDofs[r_dof->EquationId()] = false; } for (const auto& r_dof : r_mpc.GetSlaveDofsVector()) { mActiveDofs[r_dof->EquationId()] = false; } } } SizeType size_residual; CalculateResidualNorm(rModelPart, mInitialResidualNorm, size_residual, rDofSet, rb); } /** * @brief This function finalizes the solution step * @param rModelPart Reference to the ModelPart containing the problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param A System matrix (unused) * @param Dx Vector of results (variations on nodal variables) * @param b RHS vector (residual + reactions) */ void FinalizeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { BaseType::FinalizeSolutionStep(rModelPart, rDofSet, rA, rDx, rb); } ///@} ///@name Operations ///@{ ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualCriteria"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method computes the norm of the residual * @details It checks if the dof is fixed * @param rModelPart Reference to the ModelPart containing the problem. * @param rResidualSolutionNorm The norm of the residual * @param rDofNum The number of DoFs * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rb RHS vector (residual + reactions) */ virtual void CalculateResidualNorm( ModelPart& rModelPart, TDataType& rResidualSolutionNorm, SizeType& rDofNum, DofsArrayType& rDofSet, const TSystemVectorType& rb ) { // Initialize TDataType residual_solution_norm = TDataType(); SizeType dof_num = 0; // Auxiliar values TDataType residual_dof_value = 0.0; const auto it_dof_begin = rDofSet.begin(); const int number_of_dof = static_cast<int>(rDofSet.size()); // Loop over Dofs if (rModelPart.NumberOfMasterSlaveConstraints() > 0) { #pragma omp parallel for firstprivate(residual_dof_value) reduction(+:residual_solution_norm, dof_num) for (int i = 0; i < number_of_dof; i++) { auto it_dof = it_dof_begin + i; const IndexType dof_id = it_dof->EquationId(); if (mActiveDofs[dof_id]) { residual_dof_value = TSparseSpace::GetValue(rb,dof_id); residual_solution_norm += std::pow(residual_dof_value, 2); dof_num++; } } } else { #pragma omp parallel for firstprivate(residual_dof_value) reduction(+:residual_solution_norm, dof_num) for (int i = 0; i < number_of_dof; i++) { auto it_dof = it_dof_begin + i; if (!it_dof->IsFixed()) { const IndexType dof_id = it_dof->EquationId(); residual_dof_value = TSparseSpace::GetValue(rb,dof_id); residual_solution_norm += std::pow(residual_dof_value, 2); dof_num++; } } } rDofNum = dof_num; rResidualSolutionNorm = std::sqrt(residual_solution_norm); } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ TDataType mRatioTolerance; /// The ratio threshold for the norm of the residual TDataType mInitialResidualNorm; /// The reference norm of the residual TDataType mCurrentResidualNorm; /// The current norm of the residual TDataType mAlwaysConvergedNorm; /// The absolute value threshold for the norm of the residual TDataType mReferenceDispNorm; /// The norm at the beginning of the iterations std::vector<bool> mActiveDofs; /// This vector contains the dofs that are active ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; // Class ClassName ///@} ///@name Type Definitions ///@{ ///@} } // namespace Kratos. #endif // KRATOS_NEW_DISPLACEMENT_CRITERIA defined
GB_binop__first_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_int32) // A.*B function (eWiseMult): GB (_AemultB_08__first_int32) // A.*B function (eWiseMult): GB (_AemultB_02__first_int32) // A.*B function (eWiseMult): GB (_AemultB_04__first_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_int32) // A*D function (colscale): GB (_AxD__first_int32) // D*A function (rowscale): GB (_DxB__first_int32) // C+=B function (dense accum): GB (_Cdense_accumB__first_int32) // C+=b function (dense accum): GB (_Cdense_accumb__first_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_int32) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = aij #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_INT32 || GxB_NO_FIRST_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__first_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__first_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__first_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
OPENMP_bail_out.c
/********************************************************************** Name: bail_out Purpose: Exit gracefully when an OpenMP thread has encountered an error inside a parallel region Arguments: error code (zero for no error). Returns: nothing, but the program terminates with a nonzero exit status Notes: This function must be called by all threads in the team. Multiple threads may have tried to update the shared error variable at the same time, so this needs to be done atomically if we want to guarantee that the value of 1 is put into error. In our case, however, we merely want to know if the value is different from zero, so we do not need atomicity. History: Written by Rob Van der Wijngaart, July 2006 **********************************************************************/ #include <par-res-kern_general.h> void bail_out(int error) { #pragma omp barrier if (error != 0) exit(EXIT_FAILURE); }
omp_threadprivate.c
// RUN: %libomp-compile-and-run /* * Threadprivate is tested in 2 ways: * 1. The global variable declared as threadprivate should have * local copy for each thread. Otherwise race condition and * wrong result. * 2. If the value of local copy is retained for the two adjacent * parallel regions */ #include "omp_testsuite.h" #include <stdlib.h> #include <stdio.h> static int sum0=0; static int myvalue = 0; #pragma omp threadprivate(sum0) #pragma omp threadprivate(myvalue) int test_omp_threadprivate() { int sum = 0; int known_sum; int i; int iter; int *data; int size; int num_failed = 0; int my_random; omp_set_dynamic(0); #pragma omp parallel private(i) { sum0 = 0; #pragma omp for for (i = 1; i <= LOOPCOUNT; i++) { sum0 = sum0 + i; } /*end of for*/ #pragma omp critical { sum = sum + sum0; } /*end of critical */ } /* end of parallel */ known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; if (known_sum != sum ) { fprintf (stderr, " known_sum = %d, sum = %d\n", known_sum, sum); } /* the next parallel region is just used to get the number of threads*/ omp_set_dynamic(0); #pragma omp parallel { #pragma omp master { size=omp_get_num_threads(); data=(int*) malloc(size*sizeof(int)); } }/* end parallel*/ srand(45); for (iter = 0; iter < 100; iter++) { my_random = rand(); /* random number generator is called inside serial region*/ /* the first parallel region is used to initialiye myvalue and the array with my_random+rank */ #pragma omp parallel { int rank; rank = omp_get_thread_num (); myvalue = data[rank] = my_random + rank; } /* the second parallel region verifies that the value of "myvalue" is retained */ #pragma omp parallel reduction(+:num_failed) { int rank; rank = omp_get_thread_num (); num_failed = num_failed + (myvalue != data[rank]); if(myvalue != data[rank]) { fprintf (stderr, " myvalue = %d, data[rank]= %d\n", myvalue, data[rank]); } } } free (data); return (known_sum == sum) && !num_failed; } /* end of check_threadprivate*/ int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_threadprivate()) { num_failed++; } } return num_failed; }
saxpy.c
/** * @file saxpy.c * * @mainpage saxpy * * @author Xin Wu (PC²) * @date 05.04.2020 * @copyright CC BY-SA 2.0 * * saxpy performs the \c saxpy operation on host as well as accelerator. * The performance (in MB/s) for different implementations is also compared. * * The \c saxpy operation is defined as: * * y := a * x + y * * where: * * - a is a scalar. * - x and y are single-precision vectors each with n elements. */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <omp.h> #include "hsaxpy.h" #include "asaxpy.h" #include "check1ns.h" #include "wtcalc.h" #define TWO26 (1 << 26) #define NLUP (32) /** * @brief Main entry point for saxpy. */ int main(int argc, char *argv[]) { int i, n, iret, ial; size_t nbytes; float a = 2.0f, *x, *y, *yhost, *yaccl, maxabserr; struct timespec rt[2]; double wt; // walltime /* * We need 1 ns time resolution. */ check1ns(); printf("The system supports 1 ns time resolution\n"); /* * check the number of accelerators */ if (0 == omp_get_num_devices()) { printf("No accelerator found ... exit\n"); exit(EXIT_FAILURE); } /* * preparation */ n = TWO26; nbytes = sizeof(float) * n; iret = 0; if (NULL == (x = (float *) malloc(nbytes))) iret = -1; if (NULL == (y = (float *) malloc(nbytes))) iret = -1; if (NULL == (yhost = (float *) malloc(nbytes))) iret = -1; if (NULL == (yaccl = (float *) malloc(nbytes))) iret = -1; if (0 != iret) { printf("error: memory allocation\n"); free(x); free(y); free(yhost); free(yaccl); exit(EXIT_FAILURE); } #pragma omp parallel for default(none) \ shared(a, x, y, yhost, yaccl, n) private(i) for (i = 0; i < n; ++i) { x[i] = rand() % 32 / 32.0f; y[i] = rand() % 32 / 32.0f; yhost[i] = a * x[i] + y[i]; // yhost will be used as reference value yaccl[i] = 0.0f; } printf("total size of x and y is %9.1f MB\n", 2.0 * nbytes / (1 << 20)); printf("tests are averaged over %2d loops\n", NLUP); /* * saxpy on host */ /* * See hsaxpy.c for details: */ memcpy(yaccl, y, nbytes); wtcalc = -1.0; // skip 1st run for timing hsaxpy(n, a, x, yaccl); // check yaccl maxabserr = -1.0f; for (i = 0; i < n; ++i) { maxabserr = fabsf(yaccl[i] - yhost[i]) > maxabserr? fabsf(yaccl[i] - yhost[i]) : maxabserr; } // skip 2nd run for timing hsaxpy(n, a, x, yaccl); // timing : start wtcalc = 0.0; clock_gettime(CLOCK_REALTIME, rt + 0); for (int ilup = 0; ilup < 1; ++ilup) { hsaxpy(n, a, x, yaccl); } clock_gettime(CLOCK_REALTIME, rt + 1); wt=(rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("saxpy on host: %9.1f MB/s %9.1f MB/s maxabserr = %9.1f\n", 3.0 * nbytes / ((1 << 20) * wt), 3.0 * nbytes / ((1 << 20) * wtcalc), maxabserr); /* * saxpy on accl */ for (ial = 0; ial < 6; ++ial) { /* * See asaxpy.c for details: * * ial: * * 0: <<<2^7 , 2^7 >>>, auto scheduling * 1: <<<2^16, 2^10>>>, manual scheduling * 2: <<<2^15, 2^7 >>>, manual scheduling, 16x loop unrolling (2^15*2^7*16==2^26) * 3: <<<2^12, 2^7 >>>, auto scheduling, 16x loop unrolling * 4: de-linearize the vector and then collapse the ji-loop. * otherwise: cublasSaxpy in CUBLAS */ memcpy(yaccl, y, nbytes); wtcalc = -1.0; // skip 1st run for timing asaxpy(n, a, x, yaccl, ial); // check yaccl maxabserr = -1.0f; for (i = 0; i < n; ++i) { maxabserr = fabsf(yaccl[i] - yhost[i]) > maxabserr? fabsf(yaccl[i] - yhost[i]) : maxabserr; } // skip 2nd run for timing asaxpy(n, a, x, yaccl, ial); // timing : start wtcalc = 0.0; clock_gettime(CLOCK_REALTIME, rt + 0); for (int ilup = 0; ilup < NLUP; ++ilup) { asaxpy(n, a, x, yaccl, ial); } clock_gettime(CLOCK_REALTIME, rt + 1); wt=(rt[1].tv_sec - rt[0].tv_sec) + 1.0e-9 * (rt[1].tv_nsec - rt[0].tv_nsec); printf("saxpy on accl (impl. %d)\ntotal: %9.1f MB/s kernel: %9.1f MB/s maxabserr = %9.1f\n\n", ial, NLUP * 3.0 * nbytes / ((1 << 20) * wt), NLUP * 3.0 * nbytes / ((1 << 20) * wtcalc), maxabserr); } /* * release memory */ free(x); free(y); free(yhost); free(yaccl); return 0; }
GB_binop__lxor_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lxor_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__lxor_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__lxor_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__lxor_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_fp32) // A*D function (colscale): GB (_AxD__lxor_fp32) // D*A function (rowscale): GB (_DxB__lxor_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__lxor_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__lxor_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_fp32) // C=scalar+B GB (_bind1st__lxor_fp32) // C=scalar+B' GB (_bind1st_tran__lxor_fp32) // C=A+scalar GB (_bind2nd__lxor_fp32) // C=A'+scalar GB (_bind2nd_tran__lxor_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = ((aij != 0) != (bij != 0)) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) != (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LXOR || GxB_NO_FP32 || GxB_NO_LXOR_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lxor_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lxor_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lxor_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lxor_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lxor_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lxor_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lxor_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lxor_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lxor_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lxor_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lxor_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) != (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lxor_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) != (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) != (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lxor_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) != (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lxor_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__land_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__land_int8) // A.*B function (eWiseMult): GB (_AemultB_08__land_int8) // A.*B function (eWiseMult): GB (_AemultB_02__land_int8) // A.*B function (eWiseMult): GB (_AemultB_04__land_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__land_int8) // A*D function (colscale): GB (_AxD__land_int8) // D*A function (rowscale): GB (_DxB__land_int8) // C+=B function (dense accum): GB (_Cdense_accumB__land_int8) // C+=b function (dense accum): GB (_Cdense_accumb__land_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_int8) // C=scalar+B GB (_bind1st__land_int8) // C=scalar+B' GB (_bind1st_tran__land_int8) // C=A+scalar GB (_bind2nd__land_int8) // C=A'+scalar GB (_bind2nd_tran__land_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) && (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_INT8 || GxB_NO_LAND_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__land_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__land_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__land_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__land_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__land_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__land_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__land_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__land_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__land_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__land_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__land_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__land_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__land_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__land_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
morphology.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y % % MM MM O O R R P P H H O O L O O G Y Y % % M M M O O RRRR PPPP HHHHH O O L O O G GGG Y % % M M O O R R P H H O O L O O G G Y % % M M OOO R R P H H OOO LLLLL OOO GGG Y % % % % % % MagickCore Morphology Methods % % % % Software Design % % Anthony Thyssen % % January 2010 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Morphology is the application of various kernels, of any size or shape, to an % image in various ways (typically binary, but not always). % % Convolution (weighted sum or average) is just one specific type of % morphology. Just one that is very common for image bluring and sharpening % effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring. % % This module provides not only a general morphology function, and the ability % to apply more advanced or iterative morphologies, but also functions for the % generation of many different types of kernel arrays from user supplied % arguments. Prehaps even the generation of a kernel from a small image. */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/linked-list.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor-private.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/prepress.h" #include "MagickCore/quantize.h" #include "MagickCore/resource_.h" #include "MagickCore/registry.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" /* Other global definitions used by module. */ #define Minimize(assign,value) assign=MagickMin(assign,value) #define Maximize(assign,value) assign=MagickMax(assign,value) /* Integer Factorial Function - for a Binomial kernel */ #if 1 static inline size_t fact(size_t n) { size_t f,l; for(f=1, l=2; l <= n; f=f*l, l++); return(f); } #elif 1 /* glibc floating point alternatives */ #define fact(n) ((size_t)tgamma((double)n+1)) #else #define fact(n) ((size_t)lgamma((double)n+1)) #endif /* Currently these are only internal to this module */ static void CalcKernelMetaData(KernelInfo *), ExpandMirrorKernelInfo(KernelInfo *), ExpandRotateKernelInfo(KernelInfo *, const double), RotateKernelInfo(KernelInfo *, double); /* Quick function to find last kernel in a kernel list */ static inline KernelInfo *LastKernelInfo(KernelInfo *kernel) { while (kernel->next != (KernelInfo *) NULL) kernel=kernel->next; return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireKernelInfo() takes the given string (generally supplied by the % user) and converts it into a Morphology/Convolution Kernel. This allows % users to specify a kernel from a number of pre-defined kernels, or to fully % specify their own kernel for a specific Convolution or Morphology % Operation. % % The kernel so generated can be any rectangular array of floating point % values (doubles) with the 'control point' or 'pixel being affected' % anywhere within that array of values. % % Previously IM was restricted to a square of odd size using the exact % center as origin, this is no longer the case, and any rectangular kernel % with any value being declared the origin. This in turn allows the use of % highly asymmetrical kernels. % % The floating point values in the kernel can also include a special value % known as 'nan' or 'not a number' to indicate that this value is not part % of the kernel array. This allows you to shaped the kernel within its % rectangular area. That is 'nan' values provide a 'mask' for the kernel % shape. However at least one non-nan value must be provided for correct % working of a kernel. % % The returned kernel should be freed using the DestroyKernelInfo() when you % are finished with it. Do not free this memory yourself. % % Input kernel defintion strings can consist of any of three types. % % "name:args[[@><]" % Select from one of the built in kernels, using the name and % geometry arguments supplied. See AcquireKernelBuiltIn() % % "WxH[+X+Y][@><]:num, num, num ..." % a kernel of size W by H, with W*H floating point numbers following. % the 'center' can be optionally be defined at +X+Y (such that +0+0 % is top left corner). If not defined the pixel in the center, for % odd sizes, or to the immediate top or left of center for even sizes % is automatically selected. % % "num, num, num, num, ..." % list of floating point numbers defining an 'old style' odd sized % square kernel. At least 9 values should be provided for a 3x3 % square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc. % Values can be space or comma separated. This is not recommended. % % You can define a 'list of kernels' which can be used by some morphology % operators A list is defined as a semi-colon separated list kernels. % % " kernel ; kernel ; kernel ; " % % Any extra ';' characters, at start, end or between kernel defintions are % simply ignored. % % The special flags will expand a single kernel, into a list of rotated % kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree % cyclic rotations, while a '>' will generate a list of 90-degree rotations. % The '<' also exands using 90-degree rotates, but giving a 180-degree % reflected kernel before the +/- 90-degree rotations, which can be important % for Thinning operations. % % Note that 'name' kernels will start with an alphabetic character while the % new kernel specification has a ':' character in its specification string. % If neither is the case, it is assumed an old style of a simple list of % numbers generating a odd-sized square kernel has been given. % % The format of the AcquireKernal method is: % % KernelInfo *AcquireKernelInfo(const char *kernel_string) % % A description of each parameter follows: % % o kernel_string: the Morphology/Convolution kernel wanted. % */ /* This was separated so that it could be used as a separate ** array input handling function, such as for -color-matrix */ static KernelInfo *ParseKernelArray(const char *kernel_string) { KernelInfo *kernel; char token[MagickPathExtent]; const char *p, *end; register ssize_t i; double nan = sqrt((double)-1.0); /* Special Value : Not A Number */ MagickStatusType flags; GeometryInfo args; kernel=(KernelInfo *) AcquireQuantumMemory(1,sizeof(*kernel)); if (kernel == (KernelInfo *) NULL) return(kernel); (void) ResetMagickMemory(kernel,0,sizeof(*kernel)); kernel->minimum = kernel->maximum = kernel->angle = 0.0; kernel->negative_range = kernel->positive_range = 0.0; kernel->type = UserDefinedKernel; kernel->next = (KernelInfo *) NULL; kernel->signature=MagickCoreSignature; if (kernel_string == (const char *) NULL) return(kernel); /* find end of this specific kernel definition string */ end = strchr(kernel_string, ';'); if ( end == (char *) NULL ) end = strchr(kernel_string, '\0'); /* clear flags - for Expanding kernel lists thorugh rotations */ flags = NoValue; /* Has a ':' in argument - New user kernel specification FUTURE: this split on ':' could be done by StringToken() */ p = strchr(kernel_string, ':'); if ( p != (char *) NULL && p < end) { /* ParseGeometry() needs the geometry separated! -- Arrgghh */ memcpy(token, kernel_string, (size_t) (p-kernel_string)); token[p-kernel_string] = '\0'; SetGeometryInfo(&args); flags = ParseGeometry(token, &args); /* Size handling and checks of geometry settings */ if ( (flags & WidthValue) == 0 ) /* if no width then */ args.rho = args.sigma; /* then width = height */ if ( args.rho < 1.0 ) /* if width too small */ args.rho = 1.0; /* then width = 1 */ if ( args.sigma < 1.0 ) /* if height too small */ args.sigma = args.rho; /* then height = width */ kernel->width = (size_t)args.rho; kernel->height = (size_t)args.sigma; /* Offset Handling and Checks */ if ( args.xi < 0.0 || args.psi < 0.0 ) return(DestroyKernelInfo(kernel)); kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi : (ssize_t) (kernel->width-1)/2; kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi : (ssize_t) (kernel->height-1)/2; if ( kernel->x >= (ssize_t) kernel->width || kernel->y >= (ssize_t) kernel->height ) return(DestroyKernelInfo(kernel)); p++; /* advance beyond the ':' */ } else { /* ELSE - Old old specification, forming odd-square kernel */ /* count up number of values given */ p=(const char *) kernel_string; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\'')) p++; /* ignore "'" chars for convolve filter usage - Cristy */ for (i=0; p < end; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); } /* set the size of the kernel - old sized square */ kernel->width = kernel->height= (size_t) sqrt((double) i+1.0); kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; p=(const char *) kernel_string; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\'')) p++; /* ignore "'" chars for convolve filter usage - Cristy */ } /* Read in the kernel values from rest of input string argument */ kernel->values=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory( kernel->width,kernel->height*sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); kernel->minimum=MagickMaximumValue; kernel->maximum=(-MagickMaximumValue); kernel->negative_range = kernel->positive_range = 0.0; for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); if ( LocaleCompare("nan",token) == 0 || LocaleCompare("-",token) == 0 ) { kernel->values[i] = nan; /* this value is not part of neighbourhood */ } else { kernel->values[i] = StringToDouble(token,(char **) NULL); ( kernel->values[i] < 0) ? ( kernel->negative_range += kernel->values[i] ) : ( kernel->positive_range += kernel->values[i] ); Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); } } /* sanity check -- no more values in kernel definition */ GetNextToken(p,&p,MagickPathExtent,token); if ( *token != '\0' && *token != ';' && *token != '\'' ) return(DestroyKernelInfo(kernel)); #if 0 /* this was the old method of handling a incomplete kernel */ if ( i < (ssize_t) (kernel->width*kernel->height) ) { Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); for ( ; i < (ssize_t) (kernel->width*kernel->height); i++) kernel->values[i]=0.0; } #else /* Number of values for kernel was not enough - Report Error */ if ( i < (ssize_t) (kernel->width*kernel->height) ) return(DestroyKernelInfo(kernel)); #endif /* check that we recieved at least one real (non-nan) value! */ if (kernel->minimum == MagickMaximumValue) return(DestroyKernelInfo(kernel)); if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */ ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */ else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */ else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */ ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */ return(kernel); } static KernelInfo *ParseKernelName(const char *kernel_string, ExceptionInfo *exception) { char token[MagickPathExtent]; const char *p, *end; GeometryInfo args; KernelInfo *kernel; MagickStatusType flags; ssize_t type; /* Parse special 'named' kernel */ GetNextToken(kernel_string,&p,MagickPathExtent,token); type=ParseCommandOption(MagickKernelOptions,MagickFalse,token); if ( type < 0 || type == UserDefinedKernel ) return((KernelInfo *) NULL); /* not a valid named kernel */ while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';')) p++; end = strchr(p, ';'); /* end of this kernel defintion */ if ( end == (char *) NULL ) end = strchr(p, '\0'); /* ParseGeometry() needs the geometry separated! -- Arrgghh */ memcpy(token, p, (size_t) (end-p)); token[end-p] = '\0'; SetGeometryInfo(&args); flags = ParseGeometry(token, &args); #if 0 /* For Debugging Geometry Input */ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", flags, args.rho, args.sigma, args.xi, args.psi ); #endif /* special handling of missing values in input string */ switch( type ) { /* Shape Kernel Defaults */ case UnityKernel: if ( (flags & WidthValue) == 0 ) args.rho = 1.0; /* Default scale = 1.0, zero is valid */ break; case SquareKernel: case DiamondKernel: case OctagonKernel: case DiskKernel: case PlusKernel: case CrossKernel: if ( (flags & HeightValue) == 0 ) args.sigma = 1.0; /* Default scale = 1.0, zero is valid */ break; case RingKernel: if ( (flags & XValue) == 0 ) args.xi = 1.0; /* Default scale = 1.0, zero is valid */ break; case RectangleKernel: /* Rectangle - set size defaults */ if ( (flags & WidthValue) == 0 ) /* if no width then */ args.rho = args.sigma; /* then width = height */ if ( args.rho < 1.0 ) /* if width too small */ args.rho = 3; /* then width = 3 */ if ( args.sigma < 1.0 ) /* if height too small */ args.sigma = args.rho; /* then height = width */ if ( (flags & XValue) == 0 ) /* center offset if not defined */ args.xi = (double)(((ssize_t)args.rho-1)/2); if ( (flags & YValue) == 0 ) args.psi = (double)(((ssize_t)args.sigma-1)/2); break; /* Distance Kernel Defaults */ case ChebyshevKernel: case ManhattanKernel: case OctagonalKernel: case EuclideanKernel: if ( (flags & HeightValue) == 0 ) /* no distance scale */ args.sigma = 100.0; /* default distance scaling */ else if ( (flags & AspectValue ) != 0 ) /* '!' flag */ args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */ else if ( (flags & PercentValue ) != 0 ) /* '%' flag */ args.sigma *= QuantumRange/100.0; /* percentage of color range */ break; default: break; } kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args, exception); if ( kernel == (KernelInfo *) NULL ) return(kernel); /* global expand to rotated kernel list - only for single kernels */ if ( kernel->next == (KernelInfo *) NULL ) { if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 45.0); else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 90.0); else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */ ExpandMirrorKernelInfo(kernel); } return(kernel); } MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string, ExceptionInfo *exception) { KernelInfo *kernel, *new_kernel; char *kernel_cache, token[MagickPathExtent]; const char *p; if (kernel_string == (const char *) NULL) return(ParseKernelArray(kernel_string)); p=kernel_string; kernel_cache=(char *) NULL; if (*kernel_string == '@') { kernel_cache=FileToString(kernel_string+1,~0UL,exception); if (kernel_cache == (char *) NULL) return((KernelInfo *) NULL); p=(const char *) kernel_cache; } kernel=NULL; while (GetNextToken(p,(const char **) NULL,MagickPathExtent,token), *token != '\0') { /* ignore extra or multiple ';' kernel separators */ if (*token != ';') { /* tokens starting with alpha is a Named kernel */ if (isalpha((int) ((unsigned char) *token)) != 0) new_kernel=ParseKernelName(p,exception); else /* otherwise a user defined kernel array */ new_kernel=ParseKernelArray(p); /* Error handling -- this is not proper error handling! */ if (new_kernel == (KernelInfo *) NULL) { if (kernel != (KernelInfo *) NULL) kernel=DestroyKernelInfo(kernel); return((KernelInfo *) NULL); } /* initialise or append the kernel list */ if (kernel == (KernelInfo *) NULL) kernel=new_kernel; else LastKernelInfo(kernel)->next=new_kernel; } /* look for the next kernel in list */ p=strchr(p,';'); if (p == (char *) NULL) break; p++; } if (kernel_cache != (char *) NULL) kernel_cache=DestroyString(kernel_cache); return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e K e r n e l B u i l t I n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireKernelBuiltIn() returned one of the 'named' built-in types of % kernels used for special purposes such as gaussian blurring, skeleton % pruning, and edge distance determination. % % They take a KernelType, and a set of geometry style arguments, which were % typically decoded from a user supplied string, or from a more complex % Morphology Method that was requested. % % The format of the AcquireKernalBuiltIn method is: % % KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type, % const GeometryInfo args) % % A description of each parameter follows: % % o type: the pre-defined type of kernel wanted % % o args: arguments defining or modifying the kernel % % Convolution Kernels % % Unity % The a No-Op or Scaling single element kernel. % % Gaussian:{radius},{sigma} % Generate a two-dimensional gaussian kernel, as used by -gaussian. % The sigma for the curve is required. The resulting kernel is % normalized, % % If 'sigma' is zero, you get a single pixel on a field of zeros. % % NOTE: that the 'radius' is optional, but if provided can limit (clip) % the final size of the resulting kernel to a square 2*radius+1 in size. % The radius should be at least 2 times that of the sigma value, or % sever clipping and aliasing may result. If not given or set to 0 the % radius will be determined so as to produce the best minimal error % result, which is usally much larger than is normally needed. % % LoG:{radius},{sigma} % "Laplacian of a Gaussian" or "Mexician Hat" Kernel. % The supposed ideal edge detection, zero-summing kernel. % % An alturnative to this kernel is to use a "DoG" with a sigma ratio of % approx 1.6 (according to wikipedia). % % DoG:{radius},{sigma1},{sigma2} % "Difference of Gaussians" Kernel. % As "Gaussian" but with a gaussian produced by 'sigma2' subtracted % from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1. % The result is a zero-summing kernel. % % Blur:{radius},{sigma}[,{angle}] % Generates a 1 dimensional or linear gaussian blur, at the angle given % (current restricted to orthogonal angles). If a 'radius' is given the % kernel is clipped to a width of 2*radius+1. Kernel can be rotated % by a 90 degree angle. % % If 'sigma' is zero, you get a single pixel on a field of zeros. % % Note that two convolutions with two "Blur" kernels perpendicular to % each other, is equivalent to a far larger "Gaussian" kernel with the % same sigma value, However it is much faster to apply. This is how the % "-blur" operator actually works. % % Comet:{width},{sigma},{angle} % Blur in one direction only, much like how a bright object leaves % a comet like trail. The Kernel is actually half a gaussian curve, % Adding two such blurs in opposite directions produces a Blur Kernel. % Angle can be rotated in multiples of 90 degrees. % % Note that the first argument is the width of the kernel and not the % radius of the kernel. % % Binomial:[{radius}] % Generate a discrete kernel using a 2 dimentional Pascel's Triangle % of values. Used for special forma of image filters. % % # Still to be implemented... % # % # Filter2D % # Filter1D % # Set kernel values using a resize filter, and given scale (sigma) % # Cylindrical or Linear. Is this possible with an image? % # % % Named Constant Convolution Kernels % % All these are unscaled, zero-summing kernels by default. As such for % non-HDRI version of ImageMagick some form of normalization, user scaling, % and biasing the results is recommended, to prevent the resulting image % being 'clipped'. % % The 3x3 kernels (most of these) can be circularly rotated in multiples of % 45 degrees to generate the 8 angled varients of each of the kernels. % % Laplacian:{type} % Discrete Lapacian Kernels, (without normalization) % Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood) % Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood) % Type 2 : 3x3 with center:4 edge:1 corner:-2 % Type 3 : 3x3 with center:4 edge:-2 corner:1 % Type 5 : 5x5 laplacian % Type 7 : 7x7 laplacian % Type 15 : 5x5 LoG (sigma approx 1.4) % Type 19 : 9x9 LoG (sigma approx 1.4) % % Sobel:{angle} % Sobel 'Edge' convolution kernel (3x3) % | -1, 0, 1 | % | -2, 0,-2 | % | -1, 0, 1 | % % Roberts:{angle} % Roberts convolution kernel (3x3) % | 0, 0, 0 | % | -1, 1, 0 | % | 0, 0, 0 | % % Prewitt:{angle} % Prewitt Edge convolution kernel (3x3) % | -1, 0, 1 | % | -1, 0, 1 | % | -1, 0, 1 | % % Compass:{angle} % Prewitt's "Compass" convolution kernel (3x3) % | -1, 1, 1 | % | -1,-2, 1 | % | -1, 1, 1 | % % Kirsch:{angle} % Kirsch's "Compass" convolution kernel (3x3) % | -3,-3, 5 | % | -3, 0, 5 | % | -3,-3, 5 | % % FreiChen:{angle} % Frei-Chen Edge Detector is based on a kernel that is similar to % the Sobel Kernel, but is designed to be isotropic. That is it takes % into account the distance of the diagonal in the kernel. % % | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | % | 1, 0, -1 | % % FreiChen:{type},{angle} % % Frei-Chen Pre-weighted kernels... % % Type 0: default un-nomalized version shown above. % % Type 1: Orthogonal Kernel (same as type 11 below) % | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 1, 0, -1 | % % Type 2: Diagonal form of Kernel... % | 1, sqrt(2), 0 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 0, -sqrt(2) -1 | % % However this kernel is als at the heart of the FreiChen Edge Detection % Process which uses a set of 9 specially weighted kernel. These 9 % kernels not be normalized, but directly applied to the image. The % results is then added together, to produce the intensity of an edge in % a specific direction. The square root of the pixel value can then be % taken as the cosine of the edge, and at least 2 such runs at 90 degrees % from each other, both the direction and the strength of the edge can be % determined. % % Type 10: All 9 of the following pre-weighted kernels... % % Type 11: | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 1, 0, -1 | % % Type 12: | 1, sqrt(2), 1 | % | 0, 0, 0 | / 2*sqrt(2) % | 1, sqrt(2), 1 | % % Type 13: | sqrt(2), -1, 0 | % | -1, 0, 1 | / 2*sqrt(2) % | 0, 1, -sqrt(2) | % % Type 14: | 0, 1, -sqrt(2) | % | -1, 0, 1 | / 2*sqrt(2) % | sqrt(2), -1, 0 | % % Type 15: | 0, -1, 0 | % | 1, 0, 1 | / 2 % | 0, -1, 0 | % % Type 16: | 1, 0, -1 | % | 0, 0, 0 | / 2 % | -1, 0, 1 | % % Type 17: | 1, -2, 1 | % | -2, 4, -2 | / 6 % | -1, -2, 1 | % % Type 18: | -2, 1, -2 | % | 1, 4, 1 | / 6 % | -2, 1, -2 | % % Type 19: | 1, 1, 1 | % | 1, 1, 1 | / 3 % | 1, 1, 1 | % % The first 4 are for edge detection, the next 4 are for line detection % and the last is to add a average component to the results. % % Using a special type of '-1' will return all 9 pre-weighted kernels % as a multi-kernel list, so that you can use them directly (without % normalization) with the special "-set option:morphology:compose Plus" % setting to apply the full FreiChen Edge Detection Technique. % % If 'type' is large it will be taken to be an actual rotation angle for % the default FreiChen (type 0) kernel. As such FreiChen:45 will look % like a Sobel:45 but with 'sqrt(2)' instead of '2' values. % % WARNING: The above was layed out as per % http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf % But rotated 90 degrees so direction is from left rather than the top. % I have yet to find any secondary confirmation of the above. The only % other source found was actual source code at % http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf % Neigher paper defineds the kernels in a way that looks locical or % correct when taken as a whole. % % Boolean Kernels % % Diamond:[{radius}[,{scale}]] % Generate a diamond shaped kernel with given radius to the points. % Kernel size will again be radius*2+1 square and defaults to radius 1, % generating a 3x3 kernel that is slightly larger than a square. % % Square:[{radius}[,{scale}]] % Generate a square shaped kernel of size radius*2+1, and defaulting % to a 3x3 (radius 1). % % Octagon:[{radius}[,{scale}]] % Generate octagonal shaped kernel of given radius and constant scale. % Default radius is 3 producing a 7x7 kernel. A radius of 1 will result % in "Diamond" kernel. % % Disk:[{radius}[,{scale}]] % Generate a binary disk, thresholded at the radius given, the radius % may be a float-point value. Final Kernel size is floor(radius)*2+1 % square. A radius of 5.3 is the default. % % NOTE: That a low radii Disk kernels produce the same results as % many of the previously defined kernels, but differ greatly at larger % radii. Here is a table of equivalences... % "Disk:1" => "Diamond", "Octagon:1", or "Cross:1" % "Disk:1.5" => "Square" % "Disk:2" => "Diamond:2" % "Disk:2.5" => "Octagon" % "Disk:2.9" => "Square:2" % "Disk:3.5" => "Octagon:3" % "Disk:4.5" => "Octagon:4" % "Disk:5.4" => "Octagon:5" % "Disk:6.4" => "Octagon:6" % All other Disk shapes are unique to this kernel, but because a "Disk" % is more circular when using a larger radius, using a larger radius is % preferred over iterating the morphological operation. % % Rectangle:{geometry} % Simply generate a rectangle of 1's with the size given. You can also % specify the location of the 'control point', otherwise the closest % pixel to the center of the rectangle is selected. % % Properly centered and odd sized rectangles work the best. % % Symbol Dilation Kernels % % These kernel is not a good general morphological kernel, but is used % more for highlighting and marking any single pixels in an image using, % a "Dilate" method as appropriate. % % For the same reasons iterating these kernels does not produce the % same result as using a larger radius for the symbol. % % Plus:[{radius}[,{scale}]] % Cross:[{radius}[,{scale}]] % Generate a kernel in the shape of a 'plus' or a 'cross' with % a each arm the length of the given radius (default 2). % % NOTE: "plus:1" is equivalent to a "Diamond" kernel. % % Ring:{radius1},{radius2}[,{scale}] % A ring of the values given that falls between the two radii. % Defaults to a ring of approximataly 3 radius in a 7x7 kernel. % This is the 'edge' pixels of the default "Disk" kernel, % More specifically, "Ring" -> "Ring:2.5,3.5,1.0" % % Hit and Miss Kernels % % Peak:radius1,radius2 % Find any peak larger than the pixels the fall between the two radii. % The default ring of pixels is as per "Ring". % Edges % Find flat orthogonal edges of a binary shape % Corners % Find 90 degree corners of a binary shape % Diagonals:type % A special kernel to thin the 'outside' of diagonals % LineEnds:type % Find end points of lines (for pruning a skeletion) % Two types of lines ends (default to both) can be searched for % Type 0: All line ends % Type 1: single kernel for 4-conneected line ends % Type 2: single kernel for simple line ends % LineJunctions % Find three line junctions (within a skeletion) % Type 0: all line junctions % Type 1: Y Junction kernel % Type 2: Diagonal T Junction kernel % Type 3: Orthogonal T Junction kernel % Type 4: Diagonal X Junction kernel % Type 5: Orthogonal + Junction kernel % Ridges:type % Find single pixel ridges or thin lines % Type 1: Fine single pixel thick lines and ridges % Type 2: Find two pixel thick lines and ridges % ConvexHull % Octagonal Thickening Kernel, to generate convex hulls of 45 degrees % Skeleton:type % Traditional skeleton generating kernels. % Type 1: Tradional Skeleton kernel (4 connected skeleton) % Type 2: HIPR2 Skeleton kernel (8 connected skeleton) % Type 3: Thinning skeleton based on a ressearch paper by % Dan S. Bloomberg (Default Type) % ThinSE:type % A huge variety of Thinning Kernels designed to preserve conectivity. % many other kernel sets use these kernels as source definitions. % Type numbers are 41-49, 81-89, 481, and 482 which are based on % the super and sub notations used in the source research paper. % % Distance Measuring Kernels % % Different types of distance measuring methods, which are used with the % a 'Distance' morphology method for generating a gradient based on % distance from an edge of a binary shape, though there is a technique % for handling a anti-aliased shape. % % See the 'Distance' Morphological Method, for information of how it is % applied. % % Chebyshev:[{radius}][x{scale}[%!]] % Chebyshev Distance (also known as Tchebychev or Chessboard distance) % is a value of one to any neighbour, orthogonal or diagonal. One why % of thinking of it is the number of squares a 'King' or 'Queen' in % chess needs to traverse reach any other position on a chess board. % It results in a 'square' like distance function, but one where % diagonals are given a value that is closer than expected. % % Manhattan:[{radius}][x{scale}[%!]] % Manhattan Distance (also known as Rectilinear, City Block, or the Taxi % Cab distance metric), it is the distance needed when you can only % travel in horizontal or vertical directions only. It is the % distance a 'Rook' in chess would have to travel, and results in a % diamond like distances, where diagonals are further than expected. % % Octagonal:[{radius}][x{scale}[%!]] % An interleving of Manhatten and Chebyshev metrics producing an % increasing octagonally shaped distance. Distances matches those of % the "Octagon" shaped kernel of the same radius. The minimum radius % and default is 2, producing a 5x5 kernel. % % Euclidean:[{radius}][x{scale}[%!]] % Euclidean distance is the 'direct' or 'as the crow flys' distance. % However by default the kernel size only has a radius of 1, which % limits the distance to 'Knight' like moves, with only orthogonal and % diagonal measurements being correct. As such for the default kernel % you will get octagonal like distance function. % % However using a larger radius such as "Euclidean:4" you will get a % much smoother distance gradient from the edge of the shape. Especially % if the image is pre-processed to include any anti-aliasing pixels. % Of course a larger kernel is slower to use, and not always needed. % % The first three Distance Measuring Kernels will only generate distances % of exact multiples of {scale} in binary images. As such you can use a % scale of 1 without loosing any information. However you also need some % scaling when handling non-binary anti-aliased shapes. % % The "Euclidean" Distance Kernel however does generate a non-integer % fractional results, and as such scaling is vital even for binary shapes. % */ MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type, const GeometryInfo *args,ExceptionInfo *exception) { KernelInfo *kernel; register ssize_t i; register ssize_t u, v; double nan = sqrt((double)-1.0); /* Special Value : Not A Number */ /* Generate a new empty kernel if needed */ kernel=(KernelInfo *) NULL; switch(type) { case UndefinedKernel: /* These should not call this function */ case UserDefinedKernel: assert("Should not call this function" != (char *) NULL); break; case LaplacianKernel: /* Named Descrete Convolution Kernels */ case SobelKernel: /* these are defined using other kernels */ case RobertsKernel: case PrewittKernel: case CompassKernel: case KirschKernel: case FreiChenKernel: case EdgesKernel: /* Hit and Miss kernels */ case CornersKernel: case DiagonalsKernel: case LineEndsKernel: case LineJunctionsKernel: case RidgesKernel: case ConvexHullKernel: case SkeletonKernel: case ThinSEKernel: break; /* A pre-generated kernel is not needed */ #if 0 /* set to 1 to do a compile-time check that we haven't missed anything */ case UnityKernel: case GaussianKernel: case DoGKernel: case LoGKernel: case BlurKernel: case CometKernel: case BinomialKernel: case DiamondKernel: case SquareKernel: case RectangleKernel: case OctagonKernel: case DiskKernel: case PlusKernel: case CrossKernel: case RingKernel: case PeaksKernel: case ChebyshevKernel: case ManhattanKernel: case OctangonalKernel: case EuclideanKernel: #else default: #endif /* Generate the base Kernel Structure */ kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (kernel == (KernelInfo *) NULL) return(kernel); (void) ResetMagickMemory(kernel,0,sizeof(*kernel)); kernel->minimum = kernel->maximum = kernel->angle = 0.0; kernel->negative_range = kernel->positive_range = 0.0; kernel->type = type; kernel->next = (KernelInfo *) NULL; kernel->signature=MagickCoreSignature; break; } switch(type) { /* Convolution Kernels */ case UnityKernel: { kernel->height = kernel->width = (size_t) 1; kernel->x = kernel->y = (ssize_t) 0; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(1,sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); kernel->maximum = kernel->values[0] = args->rho; break; } break; case GaussianKernel: case DoGKernel: case LoGKernel: { double sigma = fabs(args->sigma), sigma2 = fabs(args->xi), A, B, R; if ( args->rho >= 1.0 ) kernel->width = (size_t)args->rho*2+1; else if ( (type != DoGKernel) || (sigma >= sigma2) ) kernel->width = GetOptimalKernelWidth2D(args->rho,sigma); else kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2); kernel->height = kernel->width; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* WARNING: The following generates a 'sampled gaussian' kernel. * What we really want is a 'discrete gaussian' kernel. * * How to do this is I don't know, but appears to be basied on the * Error Function 'erf()' (intergral of a gaussian) */ if ( type == GaussianKernel || type == DoGKernel ) { /* Calculate a Gaussian, OR positive half of a DoG */ if ( sigma > MagickEpsilon ) { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ B = (double) (1.0/(Magick2PI*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B; } else /* limiting case - a unity (normalized Dirac) kernel */ { (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } } if ( type == DoGKernel ) { /* Subtract a Negative Gaussian for "Difference of Gaussian" */ if ( sigma2 > MagickEpsilon ) { sigma = sigma2; /* simplify loop expressions */ A = 1.0/(2.0*sigma*sigma); B = (double) (1.0/(Magick2PI*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B; } else /* limiting case - a unity (normalized Dirac) kernel */ kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0; } if ( type == LoGKernel ) { /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */ if ( sigma > MagickEpsilon ) { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { R = ((double)(u*u+v*v))*A; kernel->values[i] = (1-R)*exp(-R)*B; } } else /* special case - generate a unity kernel */ { (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } } /* Note the above kernels may have been 'clipped' by a user defined ** radius, producing a smaller (darker) kernel. Also for very small ** sigma's (> 0.1) the central value becomes larger than one, and thus ** producing a very bright kernel. ** ** Normalization will still be needed. */ /* Normalize the 2D Gaussian Kernel ** ** NB: a CorrelateNormalize performs a normal Normalize if ** there are no negative values. */ CalcKernelMetaData(kernel); /* the other kernel meta-data */ ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue); break; } case BlurKernel: { double sigma = fabs(args->sigma), alpha, beta; if ( args->rho >= 1.0 ) kernel->width = (size_t)args->rho*2+1; else kernel->width = GetOptimalKernelWidth1D(args->rho,sigma); kernel->height = 1; kernel->x = (ssize_t) (kernel->width-1)/2; kernel->y = 0; kernel->negative_range = kernel->positive_range = 0.0; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); #if 1 #define KernelRank 3 /* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix). ** It generates a gaussian 3 times the width, and compresses it into ** the expected range. This produces a closer normalization of the ** resulting kernel, especially for very low sigma values. ** As such while wierd it is prefered. ** ** I am told this method originally came from Photoshop. ** ** A properly normalized curve is generated (apart from edge clipping) ** even though we later normalize the result (for edge clipping) ** to allow the correct generation of a "Difference of Blurs". */ /* initialize */ v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */ (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); /* Calculate a Positive 1D Gaussian */ if ( sigma > MagickEpsilon ) { sigma *= KernelRank; /* simplify loop expressions */ alpha = 1.0/(2.0*sigma*sigma); beta= (double) (1.0/(MagickSQ2PI*sigma )); for ( u=-v; u <= v; u++) { kernel->values[(u+v)/KernelRank] += exp(-((double)(u*u))*alpha)*beta; } } else /* special case - generate a unity kernel */ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; #else /* Direct calculation without curve averaging This is equivelent to a KernelRank of 1 */ /* Calculate a Positive Gaussian */ if ( sigma > MagickEpsilon ) { alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ beta = 1.0/(MagickSQ2PI*sigma); for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = exp(-((double)(u*u))*alpha)*beta; } else /* special case - generate a unity kernel */ { (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } #endif /* Note the above kernel may have been 'clipped' by a user defined ** radius, producing a smaller (darker) kernel. Also for very small ** sigma's (> 0.1) the central value becomes larger than one, as a ** result of not generating a actual 'discrete' kernel, and thus ** producing a very bright 'impulse'. ** ** Becuase of these two factors Normalization is required! */ /* Normalize the 1D Gaussian Kernel ** ** NB: a CorrelateNormalize performs a normal Normalize if ** there are no negative values. */ CalcKernelMetaData(kernel); /* the other kernel meta-data */ ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue); /* rotate the 1D kernel by given angle */ RotateKernelInfo(kernel, args->xi ); break; } case CometKernel: { double sigma = fabs(args->sigma), A; if ( args->rho < 1.0 ) kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1; else kernel->width = (size_t)args->rho; kernel->x = kernel->y = 0; kernel->height = 1; kernel->negative_range = kernel->positive_range = 0.0; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* A comet blur is half a 1D gaussian curve, so that the object is ** blurred in one direction only. This may not be quite the right ** curve to use so may change in the future. The function must be ** normalised after generation, which also resolves any clipping. ** ** As we are normalizing and not subtracting gaussians, ** there is no need for a divisor in the gaussian formula ** ** It is less comples */ if ( sigma > MagickEpsilon ) { #if 1 #define KernelRank 3 v = (ssize_t) kernel->width*KernelRank; /* start/end points */ (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*sizeof(*kernel->values)); sigma *= KernelRank; /* simplify the loop expression */ A = 1.0/(2.0*sigma*sigma); /* B = 1.0/(MagickSQ2PI*sigma); */ for ( u=0; u < v; u++) { kernel->values[u/KernelRank] += exp(-((double)(u*u))*A); /* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */ } for (i=0; i < (ssize_t) kernel->width; i++) kernel->positive_range += kernel->values[i]; #else A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */ /* B = 1.0/(MagickSQ2PI*sigma); */ for ( i=0; i < (ssize_t) kernel->width; i++) kernel->positive_range += kernel->values[i] = exp(-((double)(i*i))*A); /* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */ #endif } else /* special case - generate a unity kernel */ { (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; kernel->positive_range = 1.0; } kernel->minimum = 0.0; kernel->maximum = kernel->values[0]; kernel->negative_range = 0.0; ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */ RotateKernelInfo(kernel, args->xi); /* Rotate by angle */ break; } case BinomialKernel: { size_t order_f; if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; order_f = fact(kernel->width-1); kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values within diamond area to scale given */ for ( i=0, v=0; v < (ssize_t)kernel->height; v++) { size_t alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) ); for ( u=0; u < (ssize_t)kernel->width; u++, i++) kernel->positive_range += kernel->values[i] = (double) (alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) )); } kernel->minimum = 1.0; kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width]; kernel->negative_range = 0.0; break; } /* Convolution Kernels - Well Known Named Constant Kernels */ case LaplacianKernel: { switch ( (int) args->rho ) { case 0: default: /* laplacian square filter -- default */ kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1"); break; case 1: /* laplacian diamond filter */ kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0"); break; case 2: kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2"); break; case 3: kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1"); break; case 5: /* a 5x5 laplacian */ kernel=ParseKernelArray( "5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4"); break; case 7: /* a 7x7 laplacian */ kernel=ParseKernelArray( "7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" ); break; case 15: /* a 5x5 LoG (sigma approx 1.4) */ kernel=ParseKernelArray( "5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0"); break; case 19: /* a 9x9 LoG (sigma approx 1.4) */ /* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */ kernel=ParseKernelArray( "9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; break; } case SobelKernel: { /* Simple Sobel Kernel */ kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case RobertsKernel: { kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case PrewittKernel: { kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case CompassKernel: { kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case KirschKernel: { kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case FreiChenKernel: /* Direction is set to be left to right positive */ /* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */ /* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */ { switch ( (int) args->rho ) { default: case 0: kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[3] = +(MagickRealType) MagickSQ2; kernel->values[5] = -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ break; case 2: kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[1] = kernel->values[3]= +(MagickRealType) MagickSQ2; kernel->values[5] = kernel->values[7]= -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 10: { kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19",exception); if (kernel == (KernelInfo *) NULL) return(kernel); break; } case 1: case 11: kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[3] = +(MagickRealType) MagickSQ2; kernel->values[5] = -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 12: kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[1] = +(MagickRealType) MagickSQ2; kernel->values[7] = +(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 13: kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[0] = +(MagickRealType) MagickSQ2; kernel->values[8] = -(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 14: kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[2] = -(MagickRealType) MagickSQ2; kernel->values[6] = +(MagickRealType) MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 15: kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/2.0, NoValue); break; case 16: kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/2.0, NoValue); break; case 17: kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/6.0, NoValue); break; case 18: kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/6.0, NoValue); break; case 19: kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/3.0, NoValue); break; } if ( fabs(args->sigma) >= MagickEpsilon ) /* Rotate by correctly supplied 'angle' */ RotateKernelInfo(kernel, args->sigma); else if ( args->rho > 30.0 || args->rho < -30.0 ) /* Rotate by out of bounds 'type' */ RotateKernelInfo(kernel, args->rho); break; } /* Boolean or Shaped Kernels */ case DiamondKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values within diamond area to scale given */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case SquareKernel: case RectangleKernel: { double scale; if ( type == SquareKernel ) { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = (size_t) (2*args->rho+1); kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; scale = args->sigma; } else { /* NOTE: user defaults set in "AcquireKernelInfo()" */ if ( args->rho < 1.0 || args->sigma < 1.0 ) return(DestroyKernelInfo(kernel)); /* invalid args given */ kernel->width = (size_t)args->rho; kernel->height = (size_t)args->sigma; if ( args->xi < 0.0 || args->xi > (double)kernel->width || args->psi < 0.0 || args->psi > (double)kernel->height ) return(DestroyKernelInfo(kernel)); /* invalid args given */ kernel->x = (ssize_t) args->xi; kernel->y = (ssize_t) args->psi; scale = 1.0; } kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values to scale given */ u=(ssize_t) (kernel->width*kernel->height); for ( i=0; i < u; i++) kernel->values[i] = scale; kernel->minimum = kernel->maximum = scale; /* a flat shape */ kernel->positive_range = scale*u; break; } case OctagonKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius = 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ( (labs((long) u)+labs((long) v)) <= ((long)kernel->x + (long)(kernel->x/2)) ) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case DiskKernel: { ssize_t limit = (ssize_t)(args->rho*args->rho); if (args->rho < 0.4) /* default radius approx 4.3 */ kernel->width = kernel->height = 9L, limit = 18L; else kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ((u*u+v*v) <= limit) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case PlusKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values along axises to given scale */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0); break; } case CrossKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values along axises to given scale */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = (u == v || u == -v) ? args->sigma : nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0); break; } /* HitAndMiss Kernels */ case RingKernel: case PeaksKernel: { ssize_t limit1, limit2, scale; if (args->rho < args->sigma) { kernel->width = ((size_t)args->sigma)*2+1; limit1 = (ssize_t)(args->rho*args->rho); limit2 = (ssize_t)(args->sigma*args->sigma); } else { kernel->width = ((size_t)args->rho)*2+1; limit1 = (ssize_t)(args->sigma*args->sigma); limit2 = (ssize_t)(args->rho*args->rho); } if ( limit2 <= 0 ) kernel->width = 7L, limit1 = 7L, limit2 = 11L; kernel->height = kernel->width; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); /* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */ scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi); for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { ssize_t radius=u*u+v*v; if (limit1 < radius && radius <= limit2) kernel->positive_range += kernel->values[i] = (double) scale; else kernel->values[i] = nan; } kernel->minimum = kernel->maximum = (double) scale; if ( type == PeaksKernel ) { /* set the central point in the middle */ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; kernel->positive_range = 1.0; kernel->maximum = 1.0; } break; } case EdgesKernel: { kernel=AcquireKernelInfo("ThinSE:482",exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */ break; } case CornersKernel: { kernel=AcquireKernelInfo("ThinSE:87",exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */ break; } case DiagonalsKernel: { switch ( (int) args->rho ) { case 0: default: { KernelInfo *new_kernel; kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; ExpandMirrorKernelInfo(kernel); return(kernel); } case 1: kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-"); break; case 2: kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case LineEndsKernel: { /* Kernels for finding the end of thin lines */ switch ( (int) args->rho ) { case 0: default: /* set of kernels to find all end of lines */ return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>",exception)); case 1: /* kernel for 4-connected line ends - no rotation */ kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-"); break; case 2: /* kernel to add for 8-connected lines - no rotation */ kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1"); break; case 3: /* kernel to add for orthogonal line ends - does not find corners */ kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0"); break; case 4: /* traditional line end - fails on last T end */ kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case LineJunctionsKernel: { /* kernels for finding the junctions of multiple lines */ switch ( (int) args->rho ) { case 0: default: /* set of kernels to find all line junctions */ return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>",exception)); case 1: /* Y Junction */ kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-"); break; case 2: /* Diagonal T Junctions */ kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1"); break; case 3: /* Orthogonal T Junctions */ kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-"); break; case 4: /* Diagonal X Junctions */ kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1"); break; case 5: /* Orthogonal X Junctions - minimal diamond kernel */ kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case RidgesKernel: { /* Ridges - Ridge finding kernels */ KernelInfo *new_kernel; switch ( (int) args->rho ) { case 1: default: kernel=ParseKernelArray("3x1:0,1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */ break; case 2: kernel=ParseKernelArray("4x1:0,1,1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */ /* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */ /* Unfortunatally we can not yet rotate a non-square kernel */ /* But then we can't flip a non-symetrical kernel either */ new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; break; } break; } case ConvexHullKernel: { KernelInfo *new_kernel; /* first set of 8 kernels */ kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* append the mirror versions too - no flip function yet */ new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; ExpandRotateKernelInfo(new_kernel, 90.0); LastKernelInfo(kernel)->next = new_kernel; break; } case SkeletonKernel: { switch ( (int) args->rho ) { case 1: default: /* Traditional Skeleton... ** A cyclically rotated single kernel */ kernel=AcquireKernelInfo("ThinSE:482",exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */ break; case 2: /* HIPR Variation of the cyclic skeleton ** Corners of the traditional method made more forgiving, ** but the retain the same cyclic order. */ kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;",exception); if (kernel == (KernelInfo *) NULL) return(kernel); if (kernel->next == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); kernel->type = type; kernel->next->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */ break; case 3: /* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's ** "Connectivity-Preserving Morphological Image Thransformations" ** by Dan S. Bloomberg, available on Leptonica, Selected Papers, ** http://www.leptonica.com/papers/conn.pdf */ kernel=AcquireKernelInfo("ThinSE:41; ThinSE:42; ThinSE:43", exception); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->next->type = type; kernel->next->next->type = type; ExpandMirrorKernelInfo(kernel); /* 12 kernels total */ break; } break; } case ThinSEKernel: { /* Special kernels for general thinning, while preserving connections ** "Connectivity-Preserving Morphological Image Thransformations" ** by Dan S. Bloomberg, available on Leptonica, Selected Papers, ** http://www.leptonica.com/papers/conn.pdf ** And ** http://tpgit.github.com/Leptonica/ccthin_8c_source.html ** ** Note kernels do not specify the origin pixel, allowing them ** to be used for both thickening and thinning operations. */ switch ( (int) args->rho ) { /* SE for 4-connected thinning */ case 41: /* SE_4_1 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1"); break; case 42: /* SE_4_2 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-"); break; case 43: /* SE_4_3 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1"); break; case 44: /* SE_4_4 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-"); break; case 45: /* SE_4_5 */ kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-"); break; case 46: /* SE_4_6 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1"); break; case 47: /* SE_4_7 */ kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-"); break; case 48: /* SE_4_8 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1"); break; case 49: /* SE_4_9 */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1"); break; /* SE for 8-connected thinning - negatives of the above */ case 81: /* SE_8_0 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-"); break; case 82: /* SE_8_2 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-"); break; case 83: /* SE_8_3 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-"); break; case 84: /* SE_8_4 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-"); break; case 85: /* SE_8_5 */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-"); break; case 86: /* SE_8_6 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1"); break; case 87: /* SE_8_7 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-"); break; case 88: /* SE_8_8 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-"); break; case 89: /* SE_8_9 */ kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-"); break; /* Special combined SE kernels */ case 423: /* SE_4_2 , SE_4_3 Combined Kernel */ kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-"); break; case 823: /* SE_8_2 , SE_8_3 Combined Kernel */ kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-"); break; case 481: /* SE_48_1 - General Connected Corner Kernel */ kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-"); break; default: case 482: /* SE_48_2 - General Edge Kernel */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } /* Distance Measuring Kernels */ case ChebyshevKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*MagickMax(fabs((double)u),fabs((double)v)) ); kernel->maximum = kernel->values[0]; break; } case ManhattanKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*(labs((long) u)+labs((long) v)) ); kernel->maximum = kernel->values[0]; break; } case OctagonalKernel: { if (args->rho < 2.0) kernel->width = kernel->height = 5; /* default/minimum radius = 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { double r1 = MagickMax(fabs((double)u),fabs((double)v)), r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5); kernel->positive_range += kernel->values[i] = args->sigma*MagickMax(r1,r2); } kernel->maximum = kernel->values[0]; break; } case EuclideanKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height* sizeof(*kernel->values))); if (kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*sqrt((double)(u*u+v*v)) ); kernel->maximum = kernel->values[0]; break; } default: { /* No-Op Kernel - Basically just a single pixel on its own */ kernel=ParseKernelArray("1:1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = UndefinedKernel; break; } break; } return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneKernelInfo() creates a new clone of the given Kernel List so that its % can be modified without effecting the original. The cloned kernel should % be destroyed using DestoryKernelInfo() when no longer needed. % % The format of the CloneKernelInfo method is: % % KernelInfo *CloneKernelInfo(const KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to be cloned % */ MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel) { register ssize_t i; KernelInfo *new_kernel; assert(kernel != (KernelInfo *) NULL); new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (new_kernel == (KernelInfo *) NULL) return(new_kernel); *new_kernel=(*kernel); /* copy values in structure */ /* replace the values with a copy of the values */ new_kernel->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel->width,kernel->height*sizeof(*kernel->values))); if (new_kernel->values == (MagickRealType *) NULL) return(DestroyKernelInfo(new_kernel)); for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) new_kernel->values[i]=kernel->values[i]; /* Also clone the next kernel in the kernel list */ if ( kernel->next != (KernelInfo *) NULL ) { new_kernel->next = CloneKernelInfo(kernel->next); if ( new_kernel->next == (KernelInfo *) NULL ) return(DestroyKernelInfo(new_kernel)); } return(new_kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyKernelInfo() frees the memory used by a Convolution/Morphology % kernel. % % The format of the DestroyKernelInfo method is: % % KernelInfo *DestroyKernelInfo(KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to be destroyed % */ MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel) { assert(kernel != (KernelInfo *) NULL); if (kernel->next != (KernelInfo *) NULL) kernel->next=DestroyKernelInfo(kernel->next); kernel->values=(MagickRealType *) RelinquishAlignedMemory(kernel->values); kernel=(KernelInfo *) RelinquishMagickMemory(kernel); return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d M i r r o r K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandMirrorKernelInfo() takes a single kernel, and expands it into a % sequence of 90-degree rotated kernels but providing a reflected 180 % rotatation, before the -/+ 90-degree rotations. % % This special rotation order produces a better, more symetrical thinning of % objects. % % The format of the ExpandMirrorKernelInfo method is: % % void ExpandMirrorKernelInfo(KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % This function is only internel to this module, as it is not finalized, % especially with regard to non-orthogonal angles, and rotation of larger % 2D kernels. */ #if 0 static void FlopKernelInfo(KernelInfo *kernel) { /* Do a Flop by reversing each row. */ size_t y; register ssize_t x,r; register double *k,t; for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width) for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--) t=k[x], k[x]=k[r], k[r]=t; kernel->x = kernel->width - kernel->x - 1; angle = fmod(angle+180.0, 360.0); } #endif static void ExpandMirrorKernelInfo(KernelInfo *kernel) { KernelInfo *clone, *last; last = kernel; clone = CloneKernelInfo(last); RotateKernelInfo(clone, 180); /* flip */ LastKernelInfo(last)->next = clone; last = clone; clone = CloneKernelInfo(last); RotateKernelInfo(clone, 90); /* transpose */ LastKernelInfo(last)->next = clone; last = clone; clone = CloneKernelInfo(last); RotateKernelInfo(clone, 180); /* flop */ LastKernelInfo(last)->next = clone; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d R o t a t e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating % incrementally by the angle given, until the kernel repeats. % % WARNING: 45 degree rotations only works for 3x3 kernels. % While 90 degree roatations only works for linear and square kernels % % The format of the ExpandRotateKernelInfo method is: % % void ExpandRotateKernelInfo(KernelInfo *kernel, double angle) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o angle: angle to rotate in degrees % % This function is only internel to this module, as it is not finalized, % especially with regard to non-orthogonal angles, and rotation of larger % 2D kernels. */ /* Internal Routine - Return true if two kernels are the same */ static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1, const KernelInfo *kernel2) { register size_t i; /* check size and origin location */ if ( kernel1->width != kernel2->width || kernel1->height != kernel2->height || kernel1->x != kernel2->x || kernel1->y != kernel2->y ) return MagickFalse; /* check actual kernel values */ for (i=0; i < (kernel1->width*kernel1->height); i++) { /* Test for Nan equivalence */ if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) ) return MagickFalse; if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) ) return MagickFalse; /* Test actual values are equivalent */ if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon ) return MagickFalse; } return MagickTrue; } static void ExpandRotateKernelInfo(KernelInfo *kernel, const double angle) { KernelInfo *clone, *last; last = kernel; DisableMSCWarning(4127) while(1) { RestoreMSCWarning clone = CloneKernelInfo(last); RotateKernelInfo(clone, angle); if ( SameKernelInfo(kernel, clone) != MagickFalse ) break; LastKernelInfo(last)->next = clone; last = clone; } clone = DestroyKernelInfo(clone); /* kernel has repeated - junk the clone */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a l c M e t a K e r n a l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only, % using the kernel values. This should only ne used if it is not possible to % calculate that meta-data in some easier way. % % It is important that the meta-data is correct before ScaleKernelInfo() is % used to perform kernel normalization. % % The format of the CalcKernelMetaData method is: % % void CalcKernelMetaData(KernelInfo *kernel, const double scale ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to modify % % WARNING: Minimum and Maximum values are assumed to include zero, even if % zero is not part of the kernel (as in Gaussian Derived kernels). This % however is not true for flat-shaped morphological kernels. % % WARNING: Only the specific kernel pointed to is modified, not a list of % multiple kernels. % % This is an internal function and not expected to be useful outside this % module. This could change however. */ static void CalcKernelMetaData(KernelInfo *kernel) { register size_t i; kernel->minimum = kernel->maximum = 0.0; kernel->negative_range = kernel->positive_range = 0.0; for (i=0; i < (kernel->width*kernel->height); i++) { if ( fabs(kernel->values[i]) < MagickEpsilon ) kernel->values[i] = 0.0; ( kernel->values[i] < 0) ? ( kernel->negative_range += kernel->values[i] ) : ( kernel->positive_range += kernel->values[i] ); Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); } return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h o l o g y A p p l y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MorphologyApply() applies a morphological method, multiple times using % a list of multiple kernels. This is the method that should be called by % other 'operators' that internally use morphology operations as part of % their processing. % % It is basically equivalent to as MorphologyImage() (see below) but without % any user controls. This allows internel programs to use this method to % perform a specific task without possible interference by any API user % supplied settings. % % It is MorphologyImage() task to extract any such user controls, and % pass them to this function for processing. % % More specifically all given kernels should already be scaled, normalised, % and blended appropriatally before being parred to this routine. The % appropriate bias, and compose (typically 'UndefinedComposeOp') given. % % The format of the MorphologyApply method is: % % Image *MorphologyApply(const Image *image,MorphologyMethod method, % const ssize_t iterations,const KernelInfo *kernel, % const CompositeMethod compose,const double bias, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the source image % % o method: the morphology method to be applied. % % o iterations: apply the operation this many times (or no change). % A value of -1 means loop until no change found. % How this is applied may depend on the morphology method. % Typically this is a value of 1. % % o channel: the channel type. % % o kernel: An array of double representing the morphology kernel. % % o compose: How to handle or merge multi-kernel results. % If 'UndefinedCompositeOp' use default for the Morphology method. % If 'NoCompositeOp' force image to be re-iterated by each kernel. % Otherwise merge the results using the compose method given. % % o bias: Convolution Output Bias. % % o exception: return any errors or warnings in this structure. % */ static ssize_t MorphologyPrimitive(const Image *image,Image *morphology_image, const MorphologyMethod method,const KernelInfo *kernel,const double bias, ExceptionInfo *exception) { #define MorphologyTag "Morphology/Image" CacheView *image_view, *morphology_view; OffsetInfo offset; register ssize_t j, y; size_t *changes, changed, width; MagickBooleanType status; MagickOffsetType progress; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(morphology_image != (Image *) NULL); assert(morphology_image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); morphology_view=AcquireAuthenticCacheView(morphology_image,exception); width=image->columns+kernel->width-1; offset.x=0; offset.y=0; switch (method) { case ConvolveMorphology: case DilateMorphology: case DilateIntensityMorphology: case IterativeDistanceMorphology: { /* Kernel needs to used with reflection about origin. */ offset.x=(ssize_t) kernel->width-kernel->x-1; offset.y=(ssize_t) kernel->height-kernel->y-1; break; } case ErodeMorphology: case ErodeIntensityMorphology: case HitAndMissMorphology: case ThinningMorphology: case ThickenMorphology: { offset.x=kernel->x; offset.y=kernel->y; break; } default: { assert("Not a Primitive Morphology Method" != (char *) NULL); break; } } changed=0; changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(), sizeof(*changes)); if (changes == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++) changes[j]=0; if ((method == ConvolveMorphology) && (kernel->width == 1)) { register ssize_t x; /* Special handling (for speed) of vertical (blur) kernels. This performs its handling in columns rather than in rows. This is only done for convolve as it is the only method that generates very large 1-D vertical kernels (such as a 'BlurKernel') */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,morphology_image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t r; ssize_t center; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,x,-offset.y,1,image->rows+ kernel->height-1,exception); q=GetCacheViewAuthenticPixels(morphology_view,x,0,1, morphology_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*offset.y; for (r=0; r < (ssize_t) image->rows; r++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait morphology_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t v; size_t count; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); morphology_traits=GetPixelChannelTraits(morphology_image,channel); if ((traits == UndefinedPixelTrait) || (morphology_traits == UndefinedPixelTrait)) continue; if (((traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p+center) == 0)) { SetPixelChannel(morphology_image,channel,p[center+i],q); continue; } k=(&kernel->values[kernel->height-1]); pixels=p; pixel=bias; gamma=0.0; count=0; if ((morphology_traits & BlendPixelTrait) == 0) for (v=0; v < (ssize_t) kernel->height; v++) { if (!IsNaN(*k)) { pixel+=(*k)*pixels[i]; gamma+=(*k); count++; } k--; pixels+=GetPixelChannels(image); } else for (v=0; v < (ssize_t) kernel->height; v++) { if (!IsNaN(*k)) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=alpha*(*k)*pixels[i]; gamma+=alpha*(*k); count++; } k--; pixels+=GetPixelChannels(image); } if (fabs(pixel-p[center+i]) > MagickEpsilon) changes[id]++; gamma=PerceptibleReciprocal(gamma); if (count != 0) gamma*=(double) kernel->height/count; SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma* pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(morphology_image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MorphologyPrimitive) #endif proceed=SetImageProgress(image,MorphologyTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_image->type=image->type; morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++) changed+=changes[j]; changes=(size_t *) RelinquishMagickMemory(changes); return(status ? (ssize_t) changed : 0); } /* Normal handling of horizontal or rectangular kernels (row by row). */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,morphology_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; ssize_t center; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width, kernel->height,exception); q=GetCacheViewAuthenticPixels(morphology_view,0,y,morphology_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) (GetPixelChannels(image)*width*offset.y+ GetPixelChannels(image)*offset.x); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, intensity, maximum, minimum, pixel; PixelChannel channel; PixelTrait morphology_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; size_t count; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); morphology_traits=GetPixelChannelTraits(morphology_image,channel); if ((traits == UndefinedPixelTrait) || (morphology_traits == UndefinedPixelTrait)) continue; if (((traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p+center) == 0)) { SetPixelChannel(morphology_image,channel,p[center+i],q); continue; } pixels=p; maximum=0.0; minimum=(double) QuantumRange; count=kernel->width*kernel->height; switch (method) { case ConvolveMorphology: pixel=bias; break; case DilateMorphology: case ErodeIntensityMorphology: { pixel=0.0; break; } default: { pixel=(double) p[center+i]; break; } } gamma=1.0; switch (method) { case ConvolveMorphology: { /* Weighted Average of pixels using reflected kernel For correct working of this operation for asymetrical kernels, the kernel needs to be applied in its reflected form. That is its values needs to be reversed. Correlation is actually the same as this but without reflecting the kernel, and thus 'lower-level' that Convolution. However as Convolution is the more common method used, and it does not really cost us much in terms of processing to use a reflected kernel, so it is Convolution that is implemented. Correlation will have its kernel reflected before calling this function to do a Convolve. For more details of Correlation vs Convolution see http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf */ k=(&kernel->values[kernel->width*kernel->height-1]); count=0; if ((morphology_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { pixel+=(*k)*pixels[i]; count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } /* Alpha blending. */ gamma=0.0; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=alpha*(*k)*pixels[i]; gamma+=alpha*(*k); count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case ErodeMorphology: { /* Minimum value within kernel neighbourhood. The kernel is not reflected for this operation. In normal Greyscale Morphology, the kernel value should be added to the real value, this is currently not done, due to the nature of the boolean kernels being used. */ k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k >= 0.5)) { if ((double) pixels[i] < pixel) pixel=(double) pixels[i]; } k++; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case DilateMorphology: { /* Maximum value within kernel neighbourhood. For correct working of this operation for asymetrical kernels, the kernel needs to be applied in its reflected form. That is its values needs to be reversed. In normal Greyscale Morphology, the kernel value should be added to the real value, this is currently not done, due to the nature of the boolean kernels being used. */ count=0; k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k > 0.5)) { if ((double) pixels[i] > pixel) pixel=(double) pixels[i]; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case HitAndMissMorphology: case ThinningMorphology: case ThickenMorphology: { /* Minimum of foreground pixel minus maxumum of background pixels. The kernel is not reflected for this operation, and consists of both foreground and background pixel neighbourhoods, 0.0 for background, and 1.0 for foreground with either Nan or 0.5 values for don't care. This never produces a meaningless negative result. Such results cause Thinning/Thicken to not work correctly when used against a greyscale image. */ count=0; k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if (*k > 0.7) { if ((double) pixels[i] < pixel) pixel=(double) pixels[i]; } else if (*k < 0.3) { if ((double) pixels[i] > maximum) maximum=(double) pixels[i]; } count++; } k++; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } pixel-=maximum; if (pixel < 0.0) pixel=0.0; if (method == ThinningMorphology) pixel=(double) p[center+i]-pixel; else if (method == ThickenMorphology) pixel+=(double) p[center+i]+pixel; break; } case ErodeIntensityMorphology: { /* Select pixel with minimum intensity within kernel neighbourhood. The kernel is not reflected for this operation. */ count=0; k=kernel->values; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k >= 0.5)) { intensity=(double) GetPixelIntensity(image,pixels); if (intensity < minimum) { pixel=(double) pixels[i]; minimum=intensity; } count++; } k++; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case DilateIntensityMorphology: { /* Select pixel with maximum intensity within kernel neighbourhood. The kernel is not reflected for this operation. */ count=0; k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k) && (*k >= 0.5)) { intensity=(double) GetPixelIntensity(image,pixels); if (intensity > maximum) { pixel=(double) pixels[i]; maximum=intensity; } count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case IterativeDistanceMorphology: { /* Compute th iterative distance from black edge of a white image shape. Essentually white values are decreased to the smallest 'distance from edge' it can find. It works by adding kernel values to the neighbourhood, and and select the minimum value found. The kernel is rotated before use, so kernel distances match resulting distances, when a user provided asymmetric kernel is applied. This code is nearly identical to True GrayScale Morphology but not quite. GreyDilate Kernel values added, maximum value found Kernel is rotated before use. GrayErode: Kernel values subtracted and minimum value found No kernel rotation used. Note the the Iterative Distance method is essentially a GrayErode, but with negative kernel values, and kernel rotation applied. */ count=0; k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); count++; } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } break; } case UndefinedMorphology: default: break; } if (fabs(pixel-p[center+i]) > MagickEpsilon) changes[id]++; gamma=PerceptibleReciprocal(gamma); if (count != 0) gamma*=(double) kernel->height*kernel->width/count; SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(morphology_image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MorphologyPrimitive) #endif proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++) changed+=changes[j]; changes=(size_t *) RelinquishMagickMemory(changes); return(status ? (ssize_t) changed : -1); } /* This is almost identical to the MorphologyPrimative() function above, but applies the primitive directly to the actual image using two passes, once in each direction, with the results of the previous (and current) row being re-used. That is after each row is 'Sync'ed' into the image, the next row makes use of those values as part of the calculation of the next row. It repeats, but going in the oppisite (bottom-up) direction. Because of this 're-use of results' this function can not make use of multi- threaded, parellel processing. */ static ssize_t MorphologyPrimitiveDirect(Image *image, const MorphologyMethod method,const KernelInfo *kernel, ExceptionInfo *exception) { CacheView *morphology_view, *image_view; MagickBooleanType status; MagickOffsetType progress; OffsetInfo offset; size_t width, changed; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=MagickTrue; changed=0; progress=0; switch(method) { case DistanceMorphology: case VoronoiMorphology: { /* Kernel reflected about origin. */ offset.x=(ssize_t) kernel->width-kernel->x-1; offset.y=(ssize_t) kernel->height-kernel->y-1; break; } default: { offset.x=kernel->x; offset.y=kernel->y; break; } } /* Two views into same image, do not thread. */ image_view=AcquireVirtualCacheView(image,exception); morphology_view=AcquireAuthenticCacheView(image,exception); width=image->columns+kernel->width-1; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; ssize_t center; /* Read virtual pixels, and authentic pixels, from the same image! We read using virtual to get virtual pixel handling, but write back into the same image. Only top half of kernel is processed as we do a single pass downward through the image iterating the distance function as we go. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,(size_t) offset.y+1,exception); q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) (GetPixelChannels(image)*width*offset.y+ GetPixelChannels(image)*offset.x); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelTrait traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; traits=GetPixelChannelTraits(image,(PixelChannel) i); if (traits == UndefinedPixelTrait) continue; if (((traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p+center) == 0)) continue; pixels=p; pixel=(double) QuantumRange; switch (method) { case DistanceMorphology: { k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v <= offset.y; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*(kernel->y+1)-1]); pixels=q-offset.x*GetPixelChannels(image); for (u=0; u < offset.x; u++) { if (!IsNaN(*k) && ((x+u-offset.x) >= 0)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } break; } case VoronoiMorphology: { k=(&kernel->values[kernel->width*kernel->height-1]); for (v=0; v < offset.y; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*(kernel->y+1)-1]); pixels=q-offset.x*GetPixelChannels(image); for (u=0; u < offset.x; u++) { if (!IsNaN(*k) && ((x+u-offset.x) >= 0)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } break; } default: break; } if (fabs(pixel-q[i]) > MagickEpsilon) changed++; q[i]=ClampToQuantum(pixel); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphologyTag,progress++,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); /* Do the reverse pass through the image. */ image_view=AcquireVirtualCacheView(image,exception); morphology_view=AcquireAuthenticCacheView(image,exception); for (y=(ssize_t) image->rows-1; y >= 0; y--) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; ssize_t center; /* Read virtual pixels, and authentic pixels, from the same image. We read using virtual to get virtual pixel handling, but write back into the same image. Only the bottom half of the kernel is processed as we up the image. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-offset.x,y,width,(size_t) kernel->y+1,exception); q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } p+=(image->columns-1)*GetPixelChannels(image); q+=(image->columns-1)*GetPixelChannels(image); center=(ssize_t) (offset.x*GetPixelChannels(image)); for (x=(ssize_t) image->columns-1; x >= 0; x--) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelTrait traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; traits=GetPixelChannelTraits(image,(PixelChannel) i); if (traits == UndefinedPixelTrait) continue; if (((traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p+center) == 0)) continue; pixels=p; pixel=(double) QuantumRange; switch (method) { case DistanceMorphology: { k=(&kernel->values[kernel->width*(kernel->y+1)-1]); for (v=offset.y; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*kernel->y+kernel->x-1]); pixels=q; for (u=offset.x+1; u < (ssize_t) kernel->width; u++) { pixels+=GetPixelChannels(image); if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; } break; } case VoronoiMorphology: { k=(&kernel->values[kernel->width*(kernel->y+1)-1]); for (v=offset.y; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++) { if (!IsNaN(*k)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; pixels+=GetPixelChannels(image); } pixels+=(image->columns-1)*GetPixelChannels(image); } k=(&kernel->values[kernel->width*(kernel->y+1)-1]); pixels=q; for (u=offset.x+1; u < (ssize_t) kernel->width; u++) { pixels+=GetPixelChannels(image); if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns)) { if ((pixels[i]+(*k)) < pixel) pixel=(double) pixels[i]+(*k); } k--; } break; } default: break; } if (fabs(pixel-q[i]) > MagickEpsilon) changed++; q[i]=ClampToQuantum(pixel); } p-=GetPixelChannels(image); q-=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphologyTag,progress++,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } morphology_view=DestroyCacheView(morphology_view); image_view=DestroyCacheView(image_view); return(status ? (ssize_t) changed : -1); } /* Apply a Morphology by calling one of the above low level primitive application functions. This function handles any iteration loops, composition or re-iteration of results, and compound morphology methods that is based on multiple low-level (staged) morphology methods. Basically this provides the complex glue between the requested morphology method and raw low-level implementation (above). */ MagickPrivate Image *MorphologyApply(const Image *image, const MorphologyMethod method, const ssize_t iterations, const KernelInfo *kernel, const CompositeOperator compose,const double bias, ExceptionInfo *exception) { CompositeOperator curr_compose; Image *curr_image, /* Image we are working with or iterating */ *work_image, /* secondary image for primitive iteration */ *save_image, /* saved image - for 'edge' method only */ *rslt_image; /* resultant image - after multi-kernel handling */ KernelInfo *reflected_kernel, /* A reflected copy of the kernel (if needed) */ *norm_kernel, /* the current normal un-reflected kernel */ *rflt_kernel, /* the current reflected kernel (if needed) */ *this_kernel; /* the kernel being applied */ MorphologyMethod primitive; /* the current morphology primitive being applied */ CompositeOperator rslt_compose; /* multi-kernel compose method for results to use */ MagickBooleanType special, /* do we use a direct modify function? */ verbose; /* verbose output of results */ size_t method_loop, /* Loop 1: number of compound method iterations (norm 1) */ method_limit, /* maximum number of compound method iterations */ kernel_number, /* Loop 2: the kernel number being applied */ stage_loop, /* Loop 3: primitive loop for compound morphology */ stage_limit, /* how many primitives are in this compound */ kernel_loop, /* Loop 4: iterate the kernel over image */ kernel_limit, /* number of times to iterate kernel */ count, /* total count of primitive steps applied */ kernel_changed, /* total count of changed using iterated kernel */ method_changed; /* total count of changed over method iteration */ ssize_t changed; /* number pixels changed by last primitive operation */ char v_info[MagickPathExtent]; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); count = 0; /* number of low-level morphology primitives performed */ if ( iterations == 0 ) return((Image *) NULL); /* null operation - nothing to do! */ kernel_limit = (size_t) iterations; if ( iterations < 0 ) /* negative interations = infinite (well alomst) */ kernel_limit = image->columns>image->rows ? image->columns : image->rows; verbose = IsStringTrue(GetImageArtifact(image,"debug")); /* initialise for cleanup */ curr_image = (Image *) image; curr_compose = image->compose; (void) curr_compose; work_image = save_image = rslt_image = (Image *) NULL; reflected_kernel = (KernelInfo *) NULL; /* Initialize specific methods * + which loop should use the given iteratations * + how many primitives make up the compound morphology * + multi-kernel compose method to use (by default) */ method_limit = 1; /* just do method once, unless otherwise set */ stage_limit = 1; /* assume method is not a compound */ special = MagickFalse; /* assume it is NOT a direct modify primitive */ rslt_compose = compose; /* and we are composing multi-kernels as given */ switch( method ) { case SmoothMorphology: /* 4 primitive compound morphology */ stage_limit = 4; break; case OpenMorphology: /* 2 primitive compound morphology */ case OpenIntensityMorphology: case TopHatMorphology: case CloseMorphology: case CloseIntensityMorphology: case BottomHatMorphology: case EdgeMorphology: stage_limit = 2; break; case HitAndMissMorphology: rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */ /* FALL THUR */ case ThinningMorphology: case ThickenMorphology: method_limit = kernel_limit; /* iterate the whole method */ kernel_limit = 1; /* do not do kernel iteration */ break; case DistanceMorphology: case VoronoiMorphology: special = MagickTrue; /* use special direct primative */ break; default: break; } /* Apply special methods with special requirments ** For example, single run only, or post-processing requirements */ if ( special != MagickFalse ) { rslt_image=CloneImage(image,0,0,MagickTrue,exception); if (rslt_image == (Image *) NULL) goto error_cleanup; if (SetImageStorageClass(rslt_image,DirectClass,exception) == MagickFalse) goto error_cleanup; changed=MorphologyPrimitiveDirect(rslt_image,method,kernel,exception); if (verbose != MagickFalse) (void) (void) FormatLocaleFile(stderr, "%s:%.20g.%.20g #%.20g => Changed %.20g\n", CommandOptionToMnemonic(MagickMorphologyOptions, method), 1.0,0.0,1.0, (double) changed); if ( changed < 0 ) goto error_cleanup; if ( method == VoronoiMorphology ) { /* Preserve the alpha channel of input image - but turned it off */ (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel, exception); (void) CompositeImage(rslt_image,image,CopyAlphaCompositeOp, MagickTrue,0,0,exception); (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel, exception); } goto exit_cleanup; } /* Handle user (caller) specified multi-kernel composition method */ if ( compose != UndefinedCompositeOp ) rslt_compose = compose; /* override default composition for method */ if ( rslt_compose == UndefinedCompositeOp ) rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */ /* Some methods require a reflected kernel to use with primitives. * Create the reflected kernel for those methods. */ switch ( method ) { case CorrelateMorphology: case CloseMorphology: case CloseIntensityMorphology: case BottomHatMorphology: case SmoothMorphology: reflected_kernel = CloneKernelInfo(kernel); if (reflected_kernel == (KernelInfo *) NULL) goto error_cleanup; RotateKernelInfo(reflected_kernel,180); break; default: break; } /* Loops around more primitive morpholgy methods ** erose, dilate, open, close, smooth, edge, etc... */ /* Loop 1: iterate the compound method */ method_loop = 0; method_changed = 1; while ( method_loop < method_limit && method_changed > 0 ) { method_loop++; method_changed = 0; /* Loop 2: iterate over each kernel in a multi-kernel list */ norm_kernel = (KernelInfo *) kernel; this_kernel = (KernelInfo *) kernel; rflt_kernel = reflected_kernel; kernel_number = 0; while ( norm_kernel != NULL ) { /* Loop 3: Compound Morphology Staging - Select Primative to apply */ stage_loop = 0; /* the compound morphology stage number */ while ( stage_loop < stage_limit ) { stage_loop++; /* The stage of the compound morphology */ /* Select primitive morphology for this stage of compound method */ this_kernel = norm_kernel; /* default use unreflected kernel */ primitive = method; /* Assume method is a primitive */ switch( method ) { case ErodeMorphology: /* just erode */ case EdgeInMorphology: /* erode and image difference */ primitive = ErodeMorphology; break; case DilateMorphology: /* just dilate */ case EdgeOutMorphology: /* dilate and image difference */ primitive = DilateMorphology; break; case OpenMorphology: /* erode then dialate */ case TopHatMorphology: /* open and image difference */ primitive = ErodeMorphology; if ( stage_loop == 2 ) primitive = DilateMorphology; break; case OpenIntensityMorphology: primitive = ErodeIntensityMorphology; if ( stage_loop == 2 ) primitive = DilateIntensityMorphology; break; case CloseMorphology: /* dilate, then erode */ case BottomHatMorphology: /* close and image difference */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateMorphology; if ( stage_loop == 2 ) primitive = ErodeMorphology; break; case CloseIntensityMorphology: this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateIntensityMorphology; if ( stage_loop == 2 ) primitive = ErodeIntensityMorphology; break; case SmoothMorphology: /* open, close */ switch ( stage_loop ) { case 1: /* start an open method, which starts with Erode */ primitive = ErodeMorphology; break; case 2: /* now Dilate the Erode */ primitive = DilateMorphology; break; case 3: /* Reflect kernel a close */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateMorphology; break; case 4: /* Finish the Close */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = ErodeMorphology; break; } break; case EdgeMorphology: /* dilate and erode difference */ primitive = DilateMorphology; if ( stage_loop == 2 ) { save_image = curr_image; /* save the image difference */ curr_image = (Image *) image; primitive = ErodeMorphology; } break; case CorrelateMorphology: /* A Correlation is a Convolution with a reflected kernel. ** However a Convolution is a weighted sum using a reflected ** kernel. It may seem stange to convert a Correlation into a ** Convolution as the Correlation is the simplier method, but ** Convolution is much more commonly used, and it makes sense to ** implement it directly so as to avoid the need to duplicate the ** kernel when it is not required (which is typically the ** default). */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = ConvolveMorphology; break; default: break; } assert( this_kernel != (KernelInfo *) NULL ); /* Extra information for debugging compound operations */ if (verbose != MagickFalse) { if ( stage_limit > 1 ) (void) FormatLocaleString(v_info,MagickPathExtent,"%s:%.20g.%.20g -> ", CommandOptionToMnemonic(MagickMorphologyOptions,method),(double) method_loop,(double) stage_loop); else if ( primitive != method ) (void) FormatLocaleString(v_info, MagickPathExtent, "%s:%.20g -> ", CommandOptionToMnemonic(MagickMorphologyOptions, method),(double) method_loop); else v_info[0] = '\0'; } /* Loop 4: Iterate the kernel with primitive */ kernel_loop = 0; kernel_changed = 0; changed = 1; while ( kernel_loop < kernel_limit && changed > 0 ) { kernel_loop++; /* the iteration of this kernel */ /* Create a clone as the destination image, if not yet defined */ if ( work_image == (Image *) NULL ) { work_image=CloneImage(image,0,0,MagickTrue,exception); if (work_image == (Image *) NULL) goto error_cleanup; if (SetImageStorageClass(work_image,DirectClass,exception) == MagickFalse) goto error_cleanup; } /* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */ count++; changed = MorphologyPrimitive(curr_image, work_image, primitive, this_kernel, bias, exception); if (verbose != MagickFalse) { if ( kernel_loop > 1 ) (void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */ (void) (void) FormatLocaleFile(stderr, "%s%s%s:%.20g.%.20g #%.20g => Changed %.20g", v_info,CommandOptionToMnemonic(MagickMorphologyOptions, primitive),(this_kernel == rflt_kernel ) ? "*" : "", (double) (method_loop+kernel_loop-1),(double) kernel_number, (double) count,(double) changed); } if ( changed < 0 ) goto error_cleanup; kernel_changed += changed; method_changed += changed; /* prepare next loop */ { Image *tmp = work_image; /* swap images for iteration */ work_image = curr_image; curr_image = tmp; } if ( work_image == image ) work_image = (Image *) NULL; /* replace input 'image' */ } /* End Loop 4: Iterate the kernel with primitive */ if (verbose != MagickFalse && kernel_changed != (size_t)changed) (void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed); if (verbose != MagickFalse && stage_loop < stage_limit) (void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */ #if 0 (void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image); (void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image); (void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image); (void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image); (void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image); #endif } /* End Loop 3: Primative (staging) Loop for Coumpound Methods */ /* Final Post-processing for some Compound Methods ** ** The removal of any 'Sync' channel flag in the Image Compositon ** below ensures the methematical compose method is applied in a ** purely mathematical way, and only to the selected channels. ** Turn off SVG composition 'alpha blending'. */ switch( method ) { case EdgeOutMorphology: case EdgeInMorphology: case TopHatMorphology: case BottomHatMorphology: if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, "\n%s: Difference with original image",CommandOptionToMnemonic( MagickMorphologyOptions, method) ); (void) CompositeImage(curr_image,image,DifferenceCompositeOp, MagickTrue,0,0,exception); break; case EdgeMorphology: if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, "\n%s: Difference of Dilate and Erode",CommandOptionToMnemonic( MagickMorphologyOptions, method) ); (void) CompositeImage(curr_image,save_image,DifferenceCompositeOp, MagickTrue,0,0,exception); save_image = DestroyImage(save_image); /* finished with save image */ break; default: break; } /* multi-kernel handling: re-iterate, or compose results */ if ( kernel->next == (KernelInfo *) NULL ) rslt_image = curr_image; /* just return the resulting image */ else if ( rslt_compose == NoCompositeOp ) { if (verbose != MagickFalse) { if ( this_kernel->next != (KernelInfo *) NULL ) (void) FormatLocaleFile(stderr, " (re-iterate)"); else (void) FormatLocaleFile(stderr, " (done)"); } rslt_image = curr_image; /* return result, and re-iterate */ } else if ( rslt_image == (Image *) NULL) { if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, " (save for compose)"); rslt_image = curr_image; curr_image = (Image *) image; /* continue with original image */ } else { /* Add the new 'current' result to the composition ** ** The removal of any 'Sync' channel flag in the Image Compositon ** below ensures the methematical compose method is applied in a ** purely mathematical way, and only to the selected channels. ** IE: Turn off SVG composition 'alpha blending'. */ if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, " (compose \"%s\")", CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) ); (void) CompositeImage(rslt_image,curr_image,rslt_compose,MagickTrue, 0,0,exception); curr_image = DestroyImage(curr_image); curr_image = (Image *) image; /* continue with original image */ } if (verbose != MagickFalse) (void) FormatLocaleFile(stderr, "\n"); /* loop to the next kernel in a multi-kernel list */ norm_kernel = norm_kernel->next; if ( rflt_kernel != (KernelInfo *) NULL ) rflt_kernel = rflt_kernel->next; kernel_number++; } /* End Loop 2: Loop over each kernel */ } /* End Loop 1: compound method interation */ goto exit_cleanup; /* Yes goto's are bad, but it makes cleanup lot more efficient */ error_cleanup: if ( curr_image == rslt_image ) curr_image = (Image *) NULL; if ( rslt_image != (Image *) NULL ) rslt_image = DestroyImage(rslt_image); exit_cleanup: if ( curr_image == rslt_image || curr_image == image ) curr_image = (Image *) NULL; if ( curr_image != (Image *) NULL ) curr_image = DestroyImage(curr_image); if ( work_image != (Image *) NULL ) work_image = DestroyImage(work_image); if ( save_image != (Image *) NULL ) save_image = DestroyImage(save_image); if ( reflected_kernel != (KernelInfo *) NULL ) reflected_kernel = DestroyKernelInfo(reflected_kernel); return(rslt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h o l o g y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MorphologyImage() applies a user supplied kernel to the image according to % the given mophology method. % % This function applies any and all user defined settings before calling % the above internal function MorphologyApply(). % % User defined settings include... % * Output Bias for Convolution and correlation ("-define convolve:bias=??") % * Kernel Scale/normalize settings ("-define convolve:scale=??") % This can also includes the addition of a scaled unity kernel. % * Show Kernel being applied ("-define morphology:showkernel=1") % % Other operators that do not want user supplied options interfering, % especially "convolve:bias" and "morphology:showkernel" should use % MorphologyApply() directly. % % The format of the MorphologyImage method is: % % Image *MorphologyImage(const Image *image,MorphologyMethod method, % const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: the morphology method to be applied. % % o iterations: apply the operation this many times (or no change). % A value of -1 means loop until no change found. % How this is applied may depend on the morphology method. % Typically this is a value of 1. % % o kernel: An array of double representing the morphology kernel. % Warning: kernel may be normalized for the Convolve method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphologyImage(const Image *image, const MorphologyMethod method,const ssize_t iterations, const KernelInfo *kernel,ExceptionInfo *exception) { const char *artifact; CompositeOperator compose; double bias; Image *morphology_image; KernelInfo *curr_kernel; curr_kernel = (KernelInfo *) kernel; bias=0.0; compose = UndefinedCompositeOp; /* use default for method */ /* Apply Convolve/Correlate Normalization and Scaling Factors. * This is done BEFORE the ShowKernelInfo() function is called so that * users can see the results of the 'option:convolve:scale' option. */ if ( method == ConvolveMorphology || method == CorrelateMorphology ) { /* Get the bias value as it will be needed */ artifact = GetImageArtifact(image,"convolve:bias"); if ( artifact != (const char *) NULL) { if (IsGeometry(artifact) == MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidSetting","'%s' '%s'", "convolve:bias",artifact); else bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0); } /* Scale kernel according to user wishes */ artifact = GetImageArtifact(image,"convolve:scale"); if ( artifact != (const char *) NULL ) { if (IsGeometry(artifact) == MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidSetting","'%s' '%s'", "convolve:scale",artifact); else { if ( curr_kernel == kernel ) curr_kernel = CloneKernelInfo(kernel); if (curr_kernel == (KernelInfo *) NULL) return((Image *) NULL); ScaleGeometryKernelInfo(curr_kernel, artifact); } } } /* display the (normalized) kernel via stderr */ artifact=GetImageArtifact(image,"morphology:showkernel"); if (IsStringTrue(artifact) != MagickFalse) ShowKernelInfo(curr_kernel); /* Override the default handling of multi-kernel morphology results * If 'Undefined' use the default method * If 'None' (default for 'Convolve') re-iterate previous result * Otherwise merge resulting images using compose method given. * Default for 'HitAndMiss' is 'Lighten'. */ { ssize_t parse; artifact = GetImageArtifact(image,"morphology:compose"); if ( artifact != (const char *) NULL) { parse=ParseCommandOption(MagickComposeOptions, MagickFalse,artifact); if ( parse < 0 ) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"UnrecognizedComposeOperator","'%s' '%s'", "morphology:compose",artifact); else compose=(CompositeOperator)parse; } } /* Apply the Morphology */ morphology_image = MorphologyApply(image,method,iterations, curr_kernel,compose,bias,exception); /* Cleanup and Exit */ if ( curr_kernel != kernel ) curr_kernel=DestroyKernelInfo(curr_kernel); return(morphology_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R o t a t e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotateKernelInfo() rotates the kernel by the angle given. % % Currently it is restricted to 90 degree angles, of either 1D kernels % or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels. % It will ignore usless rotations for specific 'named' built-in kernels. % % The format of the RotateKernelInfo method is: % % void RotateKernelInfo(KernelInfo *kernel, double angle) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o angle: angle to rotate in degrees % % This function is currently internal to this module only, but can be exported % to other modules if needed. */ static void RotateKernelInfo(KernelInfo *kernel, double angle) { /* angle the lower kernels first */ if ( kernel->next != (KernelInfo *) NULL) RotateKernelInfo(kernel->next, angle); /* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical ** ** TODO: expand beyond simple 90 degree rotates, flips and flops */ /* Modulus the angle */ angle = fmod(angle, 360.0); if ( angle < 0 ) angle += 360.0; if ( 337.5 < angle || angle <= 22.5 ) return; /* Near zero angle - no change! - At least not at this time */ /* Handle special cases */ switch (kernel->type) { /* These built-in kernels are cylindrical kernels, rotating is useless */ case GaussianKernel: case DoGKernel: case LoGKernel: case DiskKernel: case PeaksKernel: case LaplacianKernel: case ChebyshevKernel: case ManhattanKernel: case EuclideanKernel: return; /* These may be rotatable at non-90 angles in the future */ /* but simply rotating them in multiples of 90 degrees is useless */ case SquareKernel: case DiamondKernel: case PlusKernel: case CrossKernel: return; /* These only allows a +/-90 degree rotation (by transpose) */ /* A 180 degree rotation is useless */ case BlurKernel: if ( 135.0 < angle && angle <= 225.0 ) return; if ( 225.0 < angle && angle <= 315.0 ) angle -= 180; break; default: break; } /* Attempt rotations by 45 degrees -- 3x3 kernels only */ if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 ) { if ( kernel->width == 3 && kernel->height == 3 ) { /* Rotate a 3x3 square by 45 degree angle */ double t = kernel->values[0]; kernel->values[0] = kernel->values[3]; kernel->values[3] = kernel->values[6]; kernel->values[6] = kernel->values[7]; kernel->values[7] = kernel->values[8]; kernel->values[8] = kernel->values[5]; kernel->values[5] = kernel->values[2]; kernel->values[2] = kernel->values[1]; kernel->values[1] = t; /* rotate non-centered origin */ if ( kernel->x != 1 || kernel->y != 1 ) { ssize_t x,y; x = (ssize_t) kernel->x-1; y = (ssize_t) kernel->y-1; if ( x == y ) x = 0; else if ( x == 0 ) x = -y; else if ( x == -y ) y = 0; else if ( y == 0 ) y = x; kernel->x = (ssize_t) x+1; kernel->y = (ssize_t) y+1; } angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */ kernel->angle = fmod(kernel->angle+45.0, 360.0); } else perror("Unable to rotate non-3x3 kernel by 45 degrees"); } if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 ) { if ( kernel->width == 1 || kernel->height == 1 ) { /* Do a transpose of a 1 dimensional kernel, ** which results in a fast 90 degree rotation of some type. */ ssize_t t; t = (ssize_t) kernel->width; kernel->width = kernel->height; kernel->height = (size_t) t; t = kernel->x; kernel->x = kernel->y; kernel->y = t; if ( kernel->width == 1 ) { angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */ kernel->angle = fmod(kernel->angle+90.0, 360.0); } else { angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */ kernel->angle = fmod(kernel->angle+270.0, 360.0); } } else if ( kernel->width == kernel->height ) { /* Rotate a square array of values by 90 degrees */ { register ssize_t i,j,x,y; register MagickRealType *k,t; k=kernel->values; for( i=0, x=(ssize_t) kernel->width-1; i<=x; i++, x--) for( j=0, y=(ssize_t) kernel->height-1; j<y; j++, y--) { t = k[i+j*kernel->width]; k[i+j*kernel->width] = k[j+x*kernel->width]; k[j+x*kernel->width] = k[x+y*kernel->width]; k[x+y*kernel->width] = k[y+i*kernel->width]; k[y+i*kernel->width] = t; } } /* rotate the origin - relative to center of array */ { register ssize_t x,y; x = (ssize_t) (kernel->x*2-kernel->width+1); y = (ssize_t) (kernel->y*2-kernel->height+1); kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2; kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2; } angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */ kernel->angle = fmod(kernel->angle+90.0, 360.0); } else perror("Unable to rotate a non-square, non-linear kernel 90 degrees"); } if ( 135.0 < angle && angle <= 225.0 ) { /* For a 180 degree rotation - also know as a reflection * This is actually a very very common operation! * Basically all that is needed is a reversal of the kernel data! * And a reflection of the origon */ MagickRealType t; register MagickRealType *k; ssize_t i, j; k=kernel->values; j=(ssize_t) (kernel->width*kernel->height-1); for (i=0; i < j; i++, j--) t=k[i], k[i]=k[j], k[j]=t; kernel->x = (ssize_t) kernel->width - kernel->x - 1; kernel->y = (ssize_t) kernel->height - kernel->y - 1; angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */ kernel->angle = fmod(kernel->angle+180.0, 360.0); } /* At this point angle should at least between -45 (315) and +45 degrees * In the future some form of non-orthogonal angled rotates could be * performed here, posibily with a linear kernel restriction. */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e G e o m e t r y K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleGeometryKernelInfo() takes a geometry argument string, typically % provided as a "-set option:convolve:scale {geometry}" user setting, % and modifies the kernel according to the parsed arguments of that setting. % % The first argument (and any normalization flags) are passed to % ScaleKernelInfo() to scale/normalize the kernel. The second argument % is then passed to UnityAddKernelInfo() to add a scled unity kernel % into the scaled/normalized kernel. % % The format of the ScaleGeometryKernelInfo method is: % % void ScaleGeometryKernelInfo(KernelInfo *kernel, % const double scaling_factor,const MagickStatusType normalize_flags) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to modify % % o geometry: % The geometry string to parse, typically from the user provided % "-set option:convolve:scale {geometry}" setting. % */ MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel, const char *geometry) { MagickStatusType flags; GeometryInfo args; SetGeometryInfo(&args); flags = ParseGeometry(geometry, &args); #if 0 /* For Debugging Geometry Input */ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", flags, args.rho, args.sigma, args.xi, args.psi ); #endif if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/ args.rho *= 0.01, args.sigma *= 0.01; if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */ args.rho = 1.0; if ( (flags & SigmaValue) == 0 ) args.sigma = 0.0; /* Scale/Normalize the input kernel */ ScaleKernelInfo(kernel, args.rho, (GeometryFlags) flags); /* Add Unity Kernel, for blending with original */ if ( (flags & SigmaValue) != 0 ) UnityAddKernelInfo(kernel, args.sigma); return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleKernelInfo() scales the given kernel list by the given amount, with or % without normalization of the sum of the kernel values (as per given flags). % % By default (no flags given) the values within the kernel is scaled % directly using given scaling factor without change. % % If either of the two 'normalize_flags' are given the kernel will first be % normalized and then further scaled by the scaling factor value given. % % Kernel normalization ('normalize_flags' given) is designed to ensure that % any use of the kernel scaling factor with 'Convolve' or 'Correlate' % morphology methods will fall into -1.0 to +1.0 range. Note that for % non-HDRI versions of IM this may cause images to have any negative results % clipped, unless some 'bias' is used. % % More specifically. Kernels which only contain positive values (such as a % 'Gaussian' kernel) will be scaled so that those values sum to +1.0, % ensuring a 0.0 to +1.0 output range for non-HDRI images. % % For Kernels that contain some negative values, (such as 'Sharpen' kernels) % the kernel will be scaled by the absolute of the sum of kernel values, so % that it will generally fall within the +/- 1.0 range. % % For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel % will be scaled by just the sum of the postive values, so that its output % range will again fall into the +/- 1.0 range. % % For special kernels designed for locating shapes using 'Correlate', (often % only containing +1 and -1 values, representing foreground/brackground % matching) a special normalization method is provided to scale the positive % values separately to those of the negative values, so the kernel will be % forced to become a zero-sum kernel better suited to such searches. % % WARNING: Correct normalization of the kernel assumes that the '*_range' % attributes within the kernel structure have been correctly set during the % kernels creation. % % NOTE: The values used for 'normalize_flags' have been selected specifically % to match the use of geometry options, so that '!' means NormalizeValue, '^' % means CorrelateNormalizeValue. All other GeometryFlags values are ignored. % % The format of the ScaleKernelInfo method is: % % void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor, % const MagickStatusType normalize_flags ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o scaling_factor: % multiply all values (after normalization) by this factor if not % zero. If the kernel is normalized regardless of any flags. % % o normalize_flags: % GeometryFlags defining normalization method to use. % specifically: NormalizeValue, CorrelateNormalizeValue, % and/or PercentValue % */ MagickExport void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,const GeometryFlags normalize_flags) { register double pos_scale, neg_scale; register ssize_t i; /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags); /* Normalization of Kernel */ pos_scale = 1.0; if ( (normalize_flags&NormalizeValue) != 0 ) { if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon ) /* non-zero-summing kernel (generally positive) */ pos_scale = fabs(kernel->positive_range + kernel->negative_range); else /* zero-summing kernel */ pos_scale = kernel->positive_range; } /* Force kernel into a normalized zero-summing kernel */ if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) { pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon ) ? kernel->positive_range : 1.0; neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon ) ? -kernel->negative_range : 1.0; } else neg_scale = pos_scale; /* finialize scaling_factor for positive and negative components */ pos_scale = scaling_factor/pos_scale; neg_scale = scaling_factor/neg_scale; for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) if (!IsNaN(kernel->values[i])) kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale; /* convolution output range */ kernel->positive_range *= pos_scale; kernel->negative_range *= neg_scale; /* maximum and minimum values in kernel */ kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale; kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale; /* swap kernel settings if user's scaling factor is negative */ if ( scaling_factor < MagickEpsilon ) { double t; t = kernel->positive_range; kernel->positive_range = kernel->negative_range; kernel->negative_range = t; t = kernel->maximum; kernel->maximum = kernel->minimum; kernel->minimum = 1; } return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h o w K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShowKernelInfo() outputs the details of the given kernel defination to % standard error, generally due to a users 'morphology:showkernel' option % request. % % The format of the ShowKernel method is: % % void ShowKernelInfo(const KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % */ MagickPrivate void ShowKernelInfo(const KernelInfo *kernel) { const KernelInfo *k; size_t c, i, u, v; for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) { (void) FormatLocaleFile(stderr, "Kernel"); if ( kernel->next != (KernelInfo *) NULL ) (void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c ); (void) FormatLocaleFile(stderr, " \"%s", CommandOptionToMnemonic(MagickKernelOptions, k->type) ); if ( fabs(k->angle) >= MagickEpsilon ) (void) FormatLocaleFile(stderr, "@%lg", k->angle); (void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long) k->width,(unsigned long) k->height,(long) k->x,(long) k->y); (void) FormatLocaleFile(stderr, " with values from %.*lg to %.*lg\n", GetMagickPrecision(), k->minimum, GetMagickPrecision(), k->maximum); (void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg", GetMagickPrecision(), k->negative_range, GetMagickPrecision(), k->positive_range); if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon ) (void) FormatLocaleFile(stderr, " (Zero-Summing)\n"); else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon ) (void) FormatLocaleFile(stderr, " (Normalized)\n"); else (void) FormatLocaleFile(stderr, " (Sum %.*lg)\n", GetMagickPrecision(), k->positive_range+k->negative_range); for (i=v=0; v < k->height; v++) { (void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v ); for (u=0; u < k->width; u++, i++) if (IsNaN(k->values[i])) (void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan"); else (void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3, GetMagickPrecision(), (double) k->values[i]); (void) FormatLocaleFile(stderr,"\n"); } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n i t y A d d K e r n a l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel % to the given pre-scaled and normalized Kernel. This in effect adds that % amount of the original image into the resulting convolution kernel. This % value is usually provided by the user as a percentage value in the % 'convolve:scale' setting. % % The resulting effect is to convert the defined kernels into blended % soft-blurs, unsharp kernels or into sharpening kernels. % % The format of the UnityAdditionKernelInfo method is: % % void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o scale: % scaling factor for the unity kernel to be added to % the given kernel. % */ MagickExport void UnityAddKernelInfo(KernelInfo *kernel, const double scale) { /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) UnityAddKernelInfo(kernel->next, scale); /* Add the scaled unity kernel to the existing kernel */ kernel->values[kernel->x+kernel->y*kernel->width] += scale; CalcKernelMetaData(kernel); /* recalculate the meta-data */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Z e r o K e r n e l N a n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroKernelNans() replaces any special 'nan' value that may be present in % the kernel with a zero value. This is typically done when the kernel will % be used in special hardware (GPU) convolution processors, to simply % matters. % % The format of the ZeroKernelNans method is: % % void ZeroKernelNans (KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % */ MagickPrivate void ZeroKernelNans(KernelInfo *kernel) { register size_t i; /* do the other kernels in a multi-kernel list first */ if (kernel->next != (KernelInfo *) NULL) ZeroKernelNans(kernel->next); for (i=0; i < (kernel->width*kernel->height); i++) if (IsNaN(kernel->values[i])) kernel->values[i]=0.0; return; }
region_layer.c
#include "region_layer.h" #include "activations.h" #include "blas.h" #include "box.h" #include "cuda.h" #include "utils.h" #include <stdio.h> #include <assert.h> #include <string.h> #include <stdlib.h> #define DOABS 1 region_layer make_region_layer(int batch, int w, int h, int n, int classes, int coords, int max_boxes) { region_layer l = {0}; l.type = REGION; l.n = n; l.batch = batch; l.h = h; l.w = w; l.classes = classes; l.coords = coords; l.cost = calloc(1, sizeof(float)); l.biases = calloc(n*2, sizeof(float)); l.bias_updates = calloc(n*2, sizeof(float)); l.outputs = h*w*n*(classes + coords + 1); l.inputs = l.outputs; l.max_boxes = max_boxes; l.truths = max_boxes*(5); l.delta = calloc(batch*l.outputs, sizeof(float)); l.output = calloc(batch*l.outputs, sizeof(float)); int i; for(i = 0; i < n*2; ++i){ l.biases[i] = .5; } l.forward = forward_region_layer; l.backward = backward_region_layer; #ifdef GPU l.forward_gpu = forward_region_layer_gpu; l.backward_gpu = backward_region_layer_gpu; l.output_gpu = cuda_make_array(l.output, batch*l.outputs); l.delta_gpu = cuda_make_array(l.delta, batch*l.outputs); #endif fprintf(stderr, "detection\n"); srand(0); return l; } void resize_region_layer(layer *l, int w, int h) { int old_w = l->w; int old_h = l->h; l->w = w; l->h = h; l->outputs = h*w*l->n*(l->classes + l->coords + 1); l->inputs = l->outputs; l->output = realloc(l->output, l->batch*l->outputs*sizeof(float)); l->delta = realloc(l->delta, l->batch*l->outputs*sizeof(float)); #ifdef GPU if (old_w < w || old_h < h) { cuda_free(l->delta_gpu); cuda_free(l->output_gpu); l->delta_gpu = cuda_make_array(l->delta, l->batch*l->outputs); l->output_gpu = cuda_make_array(l->output, l->batch*l->outputs); } #endif } box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h) { box b; b.x = (i + logistic_activate(x[index + 0])) / w; b.y = (j + logistic_activate(x[index + 1])) / h; b.w = exp(x[index + 2]) * biases[2*n]; b.h = exp(x[index + 3]) * biases[2*n+1]; if(DOABS){ b.w = exp(x[index + 2]) * biases[2*n] / w; b.h = exp(x[index + 3]) * biases[2*n+1] / h; } return b; } float delta_region_box(box truth, float *x, float *biases, int n, int index, int i, int j, int w, int h, float *delta, float scale) { box pred = get_region_box(x, biases, n, index, i, j, w, h); float iou = box_iou(pred, truth); float tx = (truth.x*w - i); float ty = (truth.y*h - j); float tw = log(truth.w / biases[2*n]); float th = log(truth.h / biases[2*n + 1]); if(DOABS){ tw = log(truth.w*w / biases[2*n]); th = log(truth.h*h / biases[2*n + 1]); } delta[index + 0] = scale * (tx - logistic_activate(x[index + 0])) * logistic_gradient(logistic_activate(x[index + 0])); delta[index + 1] = scale * (ty - logistic_activate(x[index + 1])) * logistic_gradient(logistic_activate(x[index + 1])); delta[index + 2] = scale * (tw - x[index + 2]); delta[index + 3] = scale * (th - x[index + 3]); return iou; } void delta_region_class(float *output, float *delta, int index, int class_id, int classes, tree *hier, float scale, float *avg_cat, int focal_loss) { int i, n; if(hier){ float pred = 1; while(class_id >= 0){ pred *= output[index + class_id]; int g = hier->group[class_id]; int offset = hier->group_offset[g]; for(i = 0; i < hier->group_size[g]; ++i){ delta[index + offset + i] = scale * (0 - output[index + offset + i]); } delta[index + class_id] = scale * (1 - output[index + class_id]); class_id = hier->parent[class_id]; } *avg_cat += pred; } else { // Focal loss if (focal_loss) { // Focal Loss float alpha = 0.5; // 0.25 or 0.5 //float gamma = 2; // hardcoded in many places of the grad-formula int ti = index + class_id; float pt = output[ti] + 0.000000000000001F; // http://fooplot.com/#W3sidHlwZSI6MCwiZXEiOiItKDEteCkqKDIqeCpsb2coeCkreC0xKSIsImNvbG9yIjoiIzAwMDAwMCJ9LHsidHlwZSI6MTAwMH1d float grad = -(1 - pt) * (2 * pt*logf(pt) + pt - 1); // http://blog.csdn.net/linmingan/article/details/77885832 //float grad = (1 - pt) * (2 * pt*logf(pt) + pt - 1); // https://github.com/unsky/focal-loss for (n = 0; n < classes; ++n) { delta[index + n] = scale * (((n == class_id) ? 1 : 0) - output[index + n]); delta[index + n] *= alpha*grad; if (n == class_id) *avg_cat += output[index + n]; } } else { // default for (n = 0; n < classes; ++n) { delta[index + n] = scale * (((n == class_id) ? 1 : 0) - output[index + n]); if (n == class_id) *avg_cat += output[index + n]; } } } } float logit(float x) { return log(x/(1.-x)); } float tisnan(float x) { return (x != x); } static int entry_index(layer l, int batch, int location, int entry) { int n = location / (l.w*l.h); int loc = location % (l.w*l.h); return batch*l.outputs + n*l.w*l.h*(l.coords + l.classes + 1) + entry*l.w*l.h + loc; } void softmax_tree(float *input, int batch, int inputs, float temp, tree *hierarchy, float *output); void forward_region_layer(const region_layer l, network_state state) { int i,j,b,t,n; int size = l.coords + l.classes + 1; memcpy(l.output, state.input, l.outputs*l.batch*sizeof(float)); #ifndef GPU flatten(l.output, l.w*l.h, size*l.n, l.batch, 1); #endif for (b = 0; b < l.batch; ++b){ for(i = 0; i < l.h*l.w*l.n; ++i){ int index = size*i + b*l.outputs; l.output[index + 4] = logistic_activate(l.output[index + 4]); } } #ifndef GPU if (l.softmax_tree){ for (b = 0; b < l.batch; ++b){ for(i = 0; i < l.h*l.w*l.n; ++i){ int index = size*i + b*l.outputs; softmax_tree(l.output + index + 5, 1, 0, 1, l.softmax_tree, l.output + index + 5); } } } else if (l.softmax){ for (b = 0; b < l.batch; ++b){ for(i = 0; i < l.h*l.w*l.n; ++i){ int index = size*i + b*l.outputs; softmax(l.output + index + 5, l.classes, 1, l.output + index + 5, 1); } } } #endif if(!state.train) return; memset(l.delta, 0, l.outputs * l.batch * sizeof(float)); float avg_iou = 0; float recall = 0; float avg_cat = 0; float avg_obj = 0; float avg_anyobj = 0; int count = 0; int class_count = 0; *(l.cost) = 0; for (b = 0; b < l.batch; ++b) { if(l.softmax_tree){ int onlyclass_id = 0; for(t = 0; t < l.max_boxes; ++t){ box truth = float_to_box(state.truth + t*5 + b*l.truths); if(!truth.x) break; // continue; int class_id = state.truth[t*5 + b*l.truths + 4]; float maxp = 0; int maxi = 0; if(truth.x > 100000 && truth.y > 100000){ for(n = 0; n < l.n*l.w*l.h; ++n){ int index = size*n + b*l.outputs + 5; float scale = l.output[index-1]; float p = scale*get_hierarchy_probability(l.output + index, l.softmax_tree, class_id); if(p > maxp){ maxp = p; maxi = n; } } int index = size*maxi + b*l.outputs + 5; delta_region_class(l.output, l.delta, index, class_id, l.classes, l.softmax_tree, l.class_scale, &avg_cat, l.focal_loss); ++class_count; onlyclass_id = 1; break; } } if(onlyclass_id) continue; } for (j = 0; j < l.h; ++j) { for (i = 0; i < l.w; ++i) { for (n = 0; n < l.n; ++n) { int index = size*(j*l.w*l.n + i*l.n + n) + b*l.outputs; box pred = get_region_box(l.output, l.biases, n, index, i, j, l.w, l.h); float best_iou = 0; int best_class_id = -1; for(t = 0; t < l.max_boxes; ++t){ box truth = float_to_box(state.truth + t*5 + b*l.truths); int class_id = state.truth[t * 5 + b*l.truths + 4]; if (class_id >= l.classes) continue; // if label contains class_id more than number of classes in the cfg-file if(!truth.x) break; // continue; float iou = box_iou(pred, truth); if (iou > best_iou) { best_class_id = state.truth[t*5 + b*l.truths + 4]; best_iou = iou; } } avg_anyobj += l.output[index + 4]; l.delta[index + 4] = l.noobject_scale * ((0 - l.output[index + 4]) * logistic_gradient(l.output[index + 4])); if(l.classfix == -1) l.delta[index + 4] = l.noobject_scale * ((best_iou - l.output[index + 4]) * logistic_gradient(l.output[index + 4])); else{ if (best_iou > l.thresh) { l.delta[index + 4] = 0; if(l.classfix > 0){ delta_region_class(l.output, l.delta, index + 5, best_class_id, l.classes, l.softmax_tree, l.class_scale*(l.classfix == 2 ? l.output[index + 4] : 1), &avg_cat, l.focal_loss); ++class_count; } } } if(*(state.net.seen) < 12800){ box truth = {0}; truth.x = (i + .5)/l.w; truth.y = (j + .5)/l.h; truth.w = l.biases[2*n]; truth.h = l.biases[2*n+1]; if(DOABS){ truth.w = l.biases[2*n]/l.w; truth.h = l.biases[2*n+1]/l.h; } delta_region_box(truth, l.output, l.biases, n, index, i, j, l.w, l.h, l.delta, .01); } } } } for(t = 0; t < l.max_boxes; ++t){ box truth = float_to_box(state.truth + t*5 + b*l.truths); int class_id = state.truth[t * 5 + b*l.truths + 4]; if (class_id >= l.classes) { printf(" Warning: in txt-labels class_id=%d >= classes=%d in cfg-file. In txt-labels class_id should be [from 0 to %d] \n", class_id, l.classes, l.classes-1); getchar(); continue; // if label contains class_id more than number of classes in the cfg-file } if(!truth.x) break; // continue; float best_iou = 0; int best_index = 0; int best_n = 0; i = (truth.x * l.w); j = (truth.y * l.h); //printf("%d %f %d %f\n", i, truth.x*l.w, j, truth.y*l.h); box truth_shift = truth; truth_shift.x = 0; truth_shift.y = 0; //printf("index %d %d\n",i, j); for(n = 0; n < l.n; ++n){ int index = size*(j*l.w*l.n + i*l.n + n) + b*l.outputs; box pred = get_region_box(l.output, l.biases, n, index, i, j, l.w, l.h); if(l.bias_match){ pred.w = l.biases[2*n]; pred.h = l.biases[2*n+1]; if(DOABS){ pred.w = l.biases[2*n]/l.w; pred.h = l.biases[2*n+1]/l.h; } } //printf("pred: (%f, %f) %f x %f\n", pred.x, pred.y, pred.w, pred.h); pred.x = 0; pred.y = 0; float iou = box_iou(pred, truth_shift); if (iou > best_iou){ best_index = index; best_iou = iou; best_n = n; } } //printf("%d %f (%f, %f) %f x %f\n", best_n, best_iou, truth.x, truth.y, truth.w, truth.h); float iou = delta_region_box(truth, l.output, l.biases, best_n, best_index, i, j, l.w, l.h, l.delta, l.coord_scale); if(iou > .5) recall += 1; avg_iou += iou; //l.delta[best_index + 4] = iou - l.output[best_index + 4]; avg_obj += l.output[best_index + 4]; l.delta[best_index + 4] = l.object_scale * (1 - l.output[best_index + 4]) * logistic_gradient(l.output[best_index + 4]); if (l.rescore) { l.delta[best_index + 4] = l.object_scale * (iou - l.output[best_index + 4]) * logistic_gradient(l.output[best_index + 4]); } if (l.map) class_id = l.map[class_id]; delta_region_class(l.output, l.delta, best_index + 5, class_id, l.classes, l.softmax_tree, l.class_scale, &avg_cat, l.focal_loss); ++count; ++class_count; } } //printf("\n"); #ifndef GPU flatten(l.delta, l.w*l.h, size*l.n, l.batch, 0); #endif *(l.cost) = pow(mag_array(l.delta, l.outputs * l.batch), 2); printf("Region Avg IOU: %f, Class: %f, Obj: %f, No Obj: %f, Avg Recall: %f, count: %d\n", avg_iou/count, avg_cat/class_count, avg_obj/count, avg_anyobj/(l.w*l.h*l.n*l.batch), recall/count, count); } void backward_region_layer(const region_layer l, network_state state) { axpy_cpu(l.batch*l.inputs, 1, l.delta, 1, state.delta, 1); } void get_region_boxes(layer l, int w, int h, float thresh, float **probs, box *boxes, int only_objectness, int *map) { int i; float *const predictions = l.output; #pragma omp parallel for for (i = 0; i < l.w*l.h; ++i){ int j, n; int row = i / l.w; int col = i % l.w; for(n = 0; n < l.n; ++n){ int index = i*l.n + n; int p_index = index * (l.classes + 5) + 4; float scale = predictions[p_index]; if(l.classfix == -1 && scale < .5) scale = 0; int box_index = index * (l.classes + 5); boxes[index] = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h); boxes[index].x *= w; boxes[index].y *= h; boxes[index].w *= w; boxes[index].h *= h; int class_index = index * (l.classes + 5) + 5; if(l.softmax_tree){ hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0); int found = 0; if(map){ for(j = 0; j < 200; ++j){ float prob = scale*predictions[class_index+map[j]]; probs[index][j] = (prob > thresh) ? prob : 0; } } else { for(j = l.classes - 1; j >= 0; --j){ if(!found && predictions[class_index + j] > .5){ found = 1; } else { predictions[class_index + j] = 0; } float prob = predictions[class_index+j]; probs[index][j] = (scale > thresh) ? prob : 0; } } } else { for(j = 0; j < l.classes; ++j){ float prob = scale*predictions[class_index+j]; probs[index][j] = (prob > thresh) ? prob : 0; } } if(only_objectness){ probs[index][0] = scale; } } } } #ifdef GPU void forward_region_layer_gpu(const region_layer l, network_state state) { /* if(!state.train){ copy_ongpu(l.batch*l.inputs, state.input, 1, l.output_gpu, 1); return; } */ flatten_ongpu(state.input, l.h*l.w, l.n*(l.coords + l.classes + 1), l.batch, 1, l.output_gpu); if(l.softmax_tree){ int i; int count = 5; for (i = 0; i < l.softmax_tree->groups; ++i) { int group_size = l.softmax_tree->group_size[i]; softmax_gpu(l.output_gpu+count, group_size, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + count); count += group_size; } }else if (l.softmax){ softmax_gpu(l.output_gpu+5, l.classes, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + 5); } float *in_cpu = calloc(l.batch*l.inputs, sizeof(float)); float *truth_cpu = 0; if(state.truth){ int num_truth = l.batch*l.truths; truth_cpu = calloc(num_truth, sizeof(float)); cuda_pull_array(state.truth, truth_cpu, num_truth); } cuda_pull_array(l.output_gpu, in_cpu, l.batch*l.inputs); //cudaStreamSynchronize(get_cuda_stream()); network_state cpu_state = state; cpu_state.train = state.train; cpu_state.truth = truth_cpu; cpu_state.input = in_cpu; forward_region_layer(l, cpu_state); //cuda_push_array(l.output_gpu, l.output, l.batch*l.outputs); free(cpu_state.input); if(!state.train) return; cuda_push_array(l.delta_gpu, l.delta, l.batch*l.outputs); //cudaStreamSynchronize(get_cuda_stream()); if(cpu_state.truth) free(cpu_state.truth); } void backward_region_layer_gpu(region_layer l, network_state state) { flatten_ongpu(l.delta_gpu, l.h*l.w, l.n*(l.coords + l.classes + 1), l.batch, 0, state.delta); } #endif void correct_region_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative) { int i; int new_w = 0; int new_h = 0; if (((float)netw / w) < ((float)neth / h)) { new_w = netw; new_h = (h * netw) / w; } else { new_h = neth; new_w = (w * neth) / h; } for (i = 0; i < n; ++i) { box b = dets[i].bbox; b.x = (b.x - (netw - new_w) / 2. / netw) / ((float)new_w / netw); b.y = (b.y - (neth - new_h) / 2. / neth) / ((float)new_h / neth); b.w *= (float)netw / new_w; b.h *= (float)neth / new_h; if (!relative) { b.x *= w; b.w *= w; b.y *= h; b.h *= h; } dets[i].bbox = b; } } void get_region_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets) { int i, j, n, z; float *predictions = l.output; if (l.batch == 2) { float *flip = l.output + l.outputs; for (j = 0; j < l.h; ++j) { for (i = 0; i < l.w / 2; ++i) { for (n = 0; n < l.n; ++n) { for (z = 0; z < l.classes + l.coords + 1; ++z) { int i1 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + i; int i2 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + (l.w - i - 1); float swap = flip[i1]; flip[i1] = flip[i2]; flip[i2] = swap; if (z == 0) { flip[i1] = -flip[i1]; flip[i2] = -flip[i2]; } } } } } for (i = 0; i < l.outputs; ++i) { l.output[i] = (l.output[i] + flip[i]) / 2.; } } for (i = 0; i < l.w*l.h; ++i) { int row = i / l.w; int col = i % l.w; for (n = 0; n < l.n; ++n) { int index = n*l.w*l.h + i; for (j = 0; j < l.classes; ++j) { dets[index].prob[j] = 0; } int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords); int box_index = entry_index(l, 0, n*l.w*l.h + i, 0); int mask_index = entry_index(l, 0, n*l.w*l.h + i, 4); float scale = l.background ? 1 : predictions[obj_index]; dets[index].bbox = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h);// , l.w*l.h); dets[index].objectness = scale > thresh ? scale : 0; if (dets[index].mask) { for (j = 0; j < l.coords - 4; ++j) { dets[index].mask[j] = l.output[mask_index + j*l.w*l.h]; } } int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + !l.background); if (l.softmax_tree) { hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0);// , l.w*l.h); if (map) { for (j = 0; j < 200; ++j) { int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + map[j]); float prob = scale*predictions[class_index]; dets[index].prob[j] = (prob > thresh) ? prob : 0; } } else { int j = hierarchy_top_prediction(predictions + class_index, l.softmax_tree, tree_thresh, l.w*l.h); dets[index].prob[j] = (scale > thresh) ? scale : 0; } } else { if (dets[index].objectness) { for (j = 0; j < l.classes; ++j) { int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + j); float prob = scale*predictions[class_index]; dets[index].prob[j] = (prob > thresh) ? prob : 0; } } } } } correct_region_boxes(dets, l.w*l.h*l.n, w, h, netw, neth, relative); } void zero_objectness(layer l) { int i, n; for (i = 0; i < l.w*l.h; ++i) { for (n = 0; n < l.n; ++n) { int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords); l.output[obj_index] = 0; } } }
target.c
/* Copyright (C) 2013-2015 Free Software Foundation, Inc. Contributed by Jakub Jelinek <jakub@redhat.com>. This file is part of the GNU Offloading and Multi Processing Library (libgomp). Libgomp is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* This file contains the support of offloading. */ #include "config.h" #include "libgomp.h" #include "oacc-plugin.h" #include "oacc-int.h" #include "gomp-constants.h" #include <limits.h> #include <stdbool.h> #include <stdlib.h> #ifdef HAVE_INTTYPES_H # include <inttypes.h> /* For PRIu64. */ #endif #include <string.h> #include <assert.h> #ifdef PLUGIN_SUPPORT #include <dlfcn.h> #include "plugin-suffix.h" #endif static void gomp_target_init (void); /* The whole initialization code for offloading plugins is only run one. */ static pthread_once_t gomp_is_initialized = PTHREAD_ONCE_INIT; /* Mutex for offload image registration. */ static gomp_mutex_t register_lock; /* This structure describes an offload image. It contains type of the target device, pointer to host table descriptor, and pointer to target data. */ struct offload_image_descr { enum offload_target_type type; void *host_table; void *target_data; }; /* Array of descriptors of offload images. */ static struct offload_image_descr *offload_images; /* Total number of offload images. */ static int num_offload_images; /* Array of descriptors for all available devices. */ static struct gomp_device_descr *devices; /* Total number of available devices. */ static int num_devices; /* Number of GOMP_OFFLOAD_CAP_OPENMP_400 devices. */ static int num_devices_openmp; /* Similar to gomp_realloc, but release register_lock before gomp_fatal. */ static void * gomp_realloc_unlock (void *old, size_t size) { void *ret = realloc (old, size); if (ret == NULL) { gomp_mutex_unlock (&register_lock); gomp_fatal ("Out of memory allocating %lu bytes", (unsigned long) size); } return ret; } /* The comparison function. */ attribute_hidden int splay_compare (splay_tree_key x, splay_tree_key y) { if (x->host_start == x->host_end && y->host_start == y->host_end) return 0; if (x->host_end <= y->host_start) return -1; if (x->host_start >= y->host_end) return 1; return 0; } #include "splay-tree.h" attribute_hidden void gomp_init_targets_once (void) { (void) pthread_once (&gomp_is_initialized, gomp_target_init); } attribute_hidden int gomp_get_num_devices (void) { gomp_init_targets_once (); return num_devices_openmp; } static struct gomp_device_descr * resolve_device (int device_id) { if (device_id == GOMP_DEVICE_ICV) { struct gomp_task_icv *icv = gomp_icv (false); device_id = icv->default_device_var; } if (device_id < 0 || device_id >= gomp_get_num_devices ()) return NULL; return &devices[device_id]; } /* Handle the case where splay_tree_lookup found oldn for newn. Helper function of gomp_map_vars. */ static inline void gomp_map_vars_existing (struct gomp_device_descr *devicep, splay_tree_key oldn, splay_tree_key newn, unsigned char kind) { if ((kind & GOMP_MAP_FLAG_FORCE) || oldn->host_start > newn->host_start || oldn->host_end < newn->host_end) { gomp_mutex_unlock (&devicep->lock); gomp_fatal ("Trying to map into device [%p..%p) object when " "[%p..%p) is already mapped", (void *) newn->host_start, (void *) newn->host_end, (void *) oldn->host_start, (void *) oldn->host_end); } oldn->refcount++; } static int get_kind (bool is_openacc, void *kinds, int idx) { return is_openacc ? ((unsigned short *) kinds)[idx] : ((unsigned char *) kinds)[idx]; } attribute_hidden struct target_mem_desc * gomp_map_vars (struct gomp_device_descr *devicep, size_t mapnum, void **hostaddrs, void **devaddrs, size_t *sizes, void *kinds, bool is_openacc, bool is_target) { size_t i, tgt_align, tgt_size, not_found_cnt = 0; const int rshift = is_openacc ? 8 : 3; const int typemask = is_openacc ? 0xff : 0x7; struct splay_tree_s *mem_map = &devicep->mem_map; struct splay_tree_key_s cur_node; struct target_mem_desc *tgt = gomp_malloc (sizeof (*tgt) + sizeof (tgt->list[0]) * mapnum); tgt->list_count = mapnum; tgt->refcount = 1; tgt->device_descr = devicep; if (mapnum == 0) return tgt; tgt_align = sizeof (void *); tgt_size = 0; if (is_target) { size_t align = 4 * sizeof (void *); tgt_align = align; tgt_size = mapnum * sizeof (void *); } gomp_mutex_lock (&devicep->lock); for (i = 0; i < mapnum; i++) { int kind = get_kind (is_openacc, kinds, i); if (hostaddrs[i] == NULL) { tgt->list[i] = NULL; continue; } cur_node.host_start = (uintptr_t) hostaddrs[i]; if (!GOMP_MAP_POINTER_P (kind & typemask)) cur_node.host_end = cur_node.host_start + sizes[i]; else cur_node.host_end = cur_node.host_start + sizeof (void *); splay_tree_key n = splay_tree_lookup (mem_map, &cur_node); if (n) { tgt->list[i] = n; gomp_map_vars_existing (devicep, n, &cur_node, kind & typemask); } else { tgt->list[i] = NULL; size_t align = (size_t) 1 << (kind >> rshift); not_found_cnt++; if (tgt_align < align) tgt_align = align; tgt_size = (tgt_size + align - 1) & ~(align - 1); tgt_size += cur_node.host_end - cur_node.host_start; if ((kind & typemask) == GOMP_MAP_TO_PSET) { size_t j; for (j = i + 1; j < mapnum; j++) if (!GOMP_MAP_POINTER_P (get_kind (is_openacc, kinds, j) & typemask)) break; else if ((uintptr_t) hostaddrs[j] < cur_node.host_start || ((uintptr_t) hostaddrs[j] + sizeof (void *) > cur_node.host_end)) break; else { tgt->list[j] = NULL; i++; } } } } if (devaddrs) { if (mapnum != 1) { gomp_mutex_unlock (&devicep->lock); gomp_fatal ("unexpected aggregation"); } tgt->to_free = devaddrs[0]; tgt->tgt_start = (uintptr_t) tgt->to_free; tgt->tgt_end = tgt->tgt_start + sizes[0]; } else if (not_found_cnt || is_target) { /* Allocate tgt_align aligned tgt_size block of memory. */ /* FIXME: Perhaps change interface to allocate properly aligned memory. */ tgt->to_free = devicep->alloc_func (devicep->target_id, tgt_size + tgt_align - 1); tgt->tgt_start = (uintptr_t) tgt->to_free; tgt->tgt_start = (tgt->tgt_start + tgt_align - 1) & ~(tgt_align - 1); tgt->tgt_end = tgt->tgt_start + tgt_size; } else { tgt->to_free = NULL; tgt->tgt_start = 0; tgt->tgt_end = 0; } tgt_size = 0; if (is_target) tgt_size = mapnum * sizeof (void *); tgt->array = NULL; if (not_found_cnt) { tgt->array = gomp_malloc (not_found_cnt * sizeof (*tgt->array)); splay_tree_node array = tgt->array; size_t j; for (i = 0; i < mapnum; i++) if (tgt->list[i] == NULL) { int kind = get_kind (is_openacc, kinds, i); if (hostaddrs[i] == NULL) continue; splay_tree_key k = &array->key; k->host_start = (uintptr_t) hostaddrs[i]; if (!GOMP_MAP_POINTER_P (kind & typemask)) k->host_end = k->host_start + sizes[i]; else k->host_end = k->host_start + sizeof (void *); splay_tree_key n = splay_tree_lookup (mem_map, k); if (n) { tgt->list[i] = n; gomp_map_vars_existing (devicep, n, k, kind & typemask); } else { size_t align = (size_t) 1 << (kind >> rshift); tgt->list[i] = k; tgt_size = (tgt_size + align - 1) & ~(align - 1); k->tgt = tgt; k->tgt_offset = tgt_size; tgt_size += k->host_end - k->host_start; k->copy_from = GOMP_MAP_COPY_FROM_P (kind & typemask); k->refcount = 1; k->async_refcount = 0; tgt->refcount++; array->left = NULL; array->right = NULL; splay_tree_insert (mem_map, array); switch (kind & typemask) { case GOMP_MAP_ALLOC: case GOMP_MAP_FROM: case GOMP_MAP_FORCE_ALLOC: case GOMP_MAP_FORCE_FROM: break; case GOMP_MAP_TO: case GOMP_MAP_TOFROM: case GOMP_MAP_FORCE_TO: case GOMP_MAP_FORCE_TOFROM: /* FIXME: Perhaps add some smarts, like if copying several adjacent fields from host to target, use some host buffer to avoid sending each var individually. */ devicep->host2dev_func (devicep->target_id, (void *) (tgt->tgt_start + k->tgt_offset), (void *) k->host_start, k->host_end - k->host_start); break; case GOMP_MAP_POINTER: cur_node.host_start = (uintptr_t) *(void **) k->host_start; if (cur_node.host_start == (uintptr_t) NULL) { cur_node.tgt_offset = (uintptr_t) NULL; /* FIXME: see above FIXME comment. */ devicep->host2dev_func (devicep->target_id, (void *) (tgt->tgt_start + k->tgt_offset), (void *) &cur_node.tgt_offset, sizeof (void *)); break; } /* Add bias to the pointer value. */ cur_node.host_start += sizes[i]; cur_node.host_end = cur_node.host_start + 1; n = splay_tree_lookup (mem_map, &cur_node); if (n == NULL) { /* Could be possibly zero size array section. */ cur_node.host_end--; n = splay_tree_lookup (mem_map, &cur_node); if (n == NULL) { cur_node.host_start--; n = splay_tree_lookup (mem_map, &cur_node); cur_node.host_start++; } } if (n == NULL) { gomp_mutex_unlock (&devicep->lock); gomp_fatal ("Pointer target of array section " "wasn't mapped"); } cur_node.host_start -= n->host_start; cur_node.tgt_offset = n->tgt->tgt_start + n->tgt_offset + cur_node.host_start; /* At this point tgt_offset is target address of the array section. Now subtract bias to get what we want to initialize the pointer with. */ cur_node.tgt_offset -= sizes[i]; /* FIXME: see above FIXME comment. */ devicep->host2dev_func (devicep->target_id, (void *) (tgt->tgt_start + k->tgt_offset), (void *) &cur_node.tgt_offset, sizeof (void *)); break; case GOMP_MAP_TO_PSET: /* FIXME: see above FIXME comment. */ devicep->host2dev_func (devicep->target_id, (void *) (tgt->tgt_start + k->tgt_offset), (void *) k->host_start, k->host_end - k->host_start); for (j = i + 1; j < mapnum; j++) if (!GOMP_MAP_POINTER_P (get_kind (is_openacc, kinds, j) & typemask)) break; else if ((uintptr_t) hostaddrs[j] < k->host_start || ((uintptr_t) hostaddrs[j] + sizeof (void *) > k->host_end)) break; else { tgt->list[j] = k; k->refcount++; cur_node.host_start = (uintptr_t) *(void **) hostaddrs[j]; if (cur_node.host_start == (uintptr_t) NULL) { cur_node.tgt_offset = (uintptr_t) NULL; /* FIXME: see above FIXME comment. */ devicep->host2dev_func (devicep->target_id, (void *) (tgt->tgt_start + k->tgt_offset + ((uintptr_t) hostaddrs[j] - k->host_start)), (void *) &cur_node.tgt_offset, sizeof (void *)); i++; continue; } /* Add bias to the pointer value. */ cur_node.host_start += sizes[j]; cur_node.host_end = cur_node.host_start + 1; n = splay_tree_lookup (mem_map, &cur_node); if (n == NULL) { /* Could be possibly zero size array section. */ cur_node.host_end--; n = splay_tree_lookup (mem_map, &cur_node); if (n == NULL) { cur_node.host_start--; n = splay_tree_lookup (mem_map, &cur_node); cur_node.host_start++; } } if (n == NULL) { gomp_mutex_unlock (&devicep->lock); gomp_fatal ("Pointer target of array section " "wasn't mapped"); } cur_node.host_start -= n->host_start; cur_node.tgt_offset = n->tgt->tgt_start + n->tgt_offset + cur_node.host_start; /* At this point tgt_offset is target address of the array section. Now subtract bias to get what we want to initialize the pointer with. */ cur_node.tgt_offset -= sizes[j]; /* FIXME: see above FIXME comment. */ devicep->host2dev_func (devicep->target_id, (void *) (tgt->tgt_start + k->tgt_offset + ((uintptr_t) hostaddrs[j] - k->host_start)), (void *) &cur_node.tgt_offset, sizeof (void *)); i++; } break; case GOMP_MAP_FORCE_PRESENT: { /* We already looked up the memory region above and it was missing. */ size_t size = k->host_end - k->host_start; gomp_mutex_unlock (&devicep->lock); #ifdef HAVE_INTTYPES_H gomp_fatal ("present clause: !acc_is_present (%p, " "%"PRIu64" (0x%"PRIx64"))", (void *) k->host_start, (uint64_t) size, (uint64_t) size); #else gomp_fatal ("present clause: !acc_is_present (%p, " "%lu (0x%lx))", (void *) k->host_start, (unsigned long) size, (unsigned long) size); #endif } break; case GOMP_MAP_FORCE_DEVICEPTR: assert (k->host_end - k->host_start == sizeof (void *)); devicep->host2dev_func (devicep->target_id, (void *) (tgt->tgt_start + k->tgt_offset), (void *) k->host_start, sizeof (void *)); break; default: gomp_mutex_unlock (&devicep->lock); gomp_fatal ("%s: unhandled kind 0x%.2x", __FUNCTION__, kind); } array++; } } } if (is_target) { for (i = 0; i < mapnum; i++) { if (tgt->list[i] == NULL) cur_node.tgt_offset = (uintptr_t) NULL; else cur_node.tgt_offset = tgt->list[i]->tgt->tgt_start + tgt->list[i]->tgt_offset; /* FIXME: see above FIXME comment. */ devicep->host2dev_func (devicep->target_id, (void *) (tgt->tgt_start + i * sizeof (void *)), (void *) &cur_node.tgt_offset, sizeof (void *)); } } gomp_mutex_unlock (&devicep->lock); return tgt; } static void gomp_unmap_tgt (struct target_mem_desc *tgt) { /* Deallocate on target the tgt->tgt_start .. tgt->tgt_end region. */ if (tgt->tgt_end) tgt->device_descr->free_func (tgt->device_descr->target_id, tgt->to_free); free (tgt->array); free (tgt); } /* Decrease the refcount for a set of mapped variables, and queue asychronous copies from the device back to the host after any work that has been issued. Because the regions are still "live", increment an asynchronous reference count to indicate that they should not be unmapped from host-side data structures until the asynchronous copy has completed. */ attribute_hidden void gomp_copy_from_async (struct target_mem_desc *tgt) { struct gomp_device_descr *devicep = tgt->device_descr; size_t i; gomp_mutex_lock (&devicep->lock); for (i = 0; i < tgt->list_count; i++) if (tgt->list[i] == NULL) ; else if (tgt->list[i]->refcount > 1) { tgt->list[i]->refcount--; tgt->list[i]->async_refcount++; } else { splay_tree_key k = tgt->list[i]; if (k->copy_from) devicep->dev2host_func (devicep->target_id, (void *) k->host_start, (void *) (k->tgt->tgt_start + k->tgt_offset), k->host_end - k->host_start); } gomp_mutex_unlock (&devicep->lock); } /* Unmap variables described by TGT. If DO_COPYFROM is true, copy relevant variables back from device to host: if it is false, it is assumed that this has been done already, i.e. by gomp_copy_from_async above. */ attribute_hidden void gomp_unmap_vars (struct target_mem_desc *tgt, bool do_copyfrom) { struct gomp_device_descr *devicep = tgt->device_descr; if (tgt->list_count == 0) { free (tgt); return; } gomp_mutex_lock (&devicep->lock); size_t i; for (i = 0; i < tgt->list_count; i++) if (tgt->list[i] == NULL) ; else if (tgt->list[i]->refcount > 1) tgt->list[i]->refcount--; else if (tgt->list[i]->async_refcount > 0) tgt->list[i]->async_refcount--; else { splay_tree_key k = tgt->list[i]; if (k->copy_from && do_copyfrom) devicep->dev2host_func (devicep->target_id, (void *) k->host_start, (void *) (k->tgt->tgt_start + k->tgt_offset), k->host_end - k->host_start); splay_tree_remove (&devicep->mem_map, k); if (k->tgt->refcount > 1) k->tgt->refcount--; else gomp_unmap_tgt (k->tgt); } if (tgt->refcount > 1) tgt->refcount--; else gomp_unmap_tgt (tgt); gomp_mutex_unlock (&devicep->lock); } static void gomp_update (struct gomp_device_descr *devicep, size_t mapnum, void **hostaddrs, size_t *sizes, void *kinds, bool is_openacc) { size_t i; struct splay_tree_key_s cur_node; const int typemask = is_openacc ? 0xff : 0x7; if (!devicep) return; if (mapnum == 0) return; gomp_mutex_lock (&devicep->lock); for (i = 0; i < mapnum; i++) if (sizes[i]) { cur_node.host_start = (uintptr_t) hostaddrs[i]; cur_node.host_end = cur_node.host_start + sizes[i]; splay_tree_key n = splay_tree_lookup (&devicep->mem_map, &cur_node); if (n) { int kind = get_kind (is_openacc, kinds, i); if (n->host_start > cur_node.host_start || n->host_end < cur_node.host_end) { gomp_mutex_unlock (&devicep->lock); gomp_fatal ("Trying to update [%p..%p) object when " "only [%p..%p) is mapped", (void *) cur_node.host_start, (void *) cur_node.host_end, (void *) n->host_start, (void *) n->host_end); } if (GOMP_MAP_COPY_TO_P (kind & typemask)) devicep->host2dev_func (devicep->target_id, (void *) (n->tgt->tgt_start + n->tgt_offset + cur_node.host_start - n->host_start), (void *) cur_node.host_start, cur_node.host_end - cur_node.host_start); if (GOMP_MAP_COPY_FROM_P (kind & typemask)) devicep->dev2host_func (devicep->target_id, (void *) cur_node.host_start, (void *) (n->tgt->tgt_start + n->tgt_offset + cur_node.host_start - n->host_start), cur_node.host_end - cur_node.host_start); } else { gomp_mutex_unlock (&devicep->lock); gomp_fatal ("Trying to update [%p..%p) object that is not mapped", (void *) cur_node.host_start, (void *) cur_node.host_end); } } gomp_mutex_unlock (&devicep->lock); } /* Load image pointed by TARGET_DATA to the device, specified by DEVICEP. And insert to splay tree the mapping between addresses from HOST_TABLE and from loaded target image. */ static void gomp_offload_image_to_device (struct gomp_device_descr *devicep, void *host_table, void *target_data, bool is_register_lock) { void **host_func_table = ((void ***) host_table)[0]; void **host_funcs_end = ((void ***) host_table)[1]; void **host_var_table = ((void ***) host_table)[2]; void **host_vars_end = ((void ***) host_table)[3]; /* The func table contains only addresses, the var table contains addresses and corresponding sizes. */ int num_funcs = host_funcs_end - host_func_table; int num_vars = (host_vars_end - host_var_table) / 2; /* Load image to device and get target addresses for the image. */ struct addr_pair *target_table = NULL; int i, num_target_entries = devicep->load_image_func (devicep->target_id, target_data, &target_table); if (num_target_entries != num_funcs + num_vars) { gomp_mutex_unlock (&devicep->lock); if (is_register_lock) gomp_mutex_unlock (&register_lock); gomp_fatal ("Can't map target functions or variables"); } /* Insert host-target address mapping into splay tree. */ struct target_mem_desc *tgt = gomp_malloc (sizeof (*tgt)); tgt->array = gomp_malloc ((num_funcs + num_vars) * sizeof (*tgt->array)); tgt->refcount = 1; tgt->tgt_start = 0; tgt->tgt_end = 0; tgt->to_free = NULL; tgt->prev = NULL; tgt->list_count = 0; tgt->device_descr = devicep; splay_tree_node array = tgt->array; for (i = 0; i < num_funcs; i++) { splay_tree_key k = &array->key; k->host_start = (uintptr_t) host_func_table[i]; k->host_end = k->host_start + 1; k->tgt = tgt; k->tgt_offset = target_table[i].start; k->refcount = 1; k->async_refcount = 0; k->copy_from = false; array->left = NULL; array->right = NULL; splay_tree_insert (&devicep->mem_map, array); array++; } for (i = 0; i < num_vars; i++) { struct addr_pair *target_var = &target_table[num_funcs + i]; if (target_var->end - target_var->start != (uintptr_t) host_var_table[i * 2 + 1]) { gomp_mutex_unlock (&devicep->lock); if (is_register_lock) gomp_mutex_unlock (&register_lock); gomp_fatal ("Can't map target variables (size mismatch)"); } splay_tree_key k = &array->key; k->host_start = (uintptr_t) host_var_table[i * 2]; k->host_end = k->host_start + (uintptr_t) host_var_table[i * 2 + 1]; k->tgt = tgt; k->tgt_offset = target_var->start; k->refcount = 1; k->async_refcount = 0; k->copy_from = false; array->left = NULL; array->right = NULL; splay_tree_insert (&devicep->mem_map, array); array++; } free (target_table); } /* This function should be called from every offload image while loading. It gets the descriptor of the host func and var tables HOST_TABLE, TYPE of the target, and TARGET_DATA needed by target plugin. */ void GOMP_offload_register (void *host_table, enum offload_target_type target_type, void *target_data) { int i; gomp_mutex_lock (&register_lock); /* Load image to all initialized devices. */ for (i = 0; i < num_devices; i++) { struct gomp_device_descr *devicep = &devices[i]; gomp_mutex_lock (&devicep->lock); if (devicep->type == target_type && devicep->is_initialized) gomp_offload_image_to_device (devicep, host_table, target_data, true); gomp_mutex_unlock (&devicep->lock); } /* Insert image to array of pending images. */ offload_images = gomp_realloc_unlock (offload_images, (num_offload_images + 1) * sizeof (struct offload_image_descr)); offload_images[num_offload_images].type = target_type; offload_images[num_offload_images].host_table = host_table; offload_images[num_offload_images].target_data = target_data; num_offload_images++; gomp_mutex_unlock (&register_lock); } /* This function should be called from every offload image while unloading. It gets the descriptor of the host func and var tables HOST_TABLE, TYPE of the target, and TARGET_DATA needed by target plugin. */ void GOMP_offload_unregister (void *host_table, enum offload_target_type target_type, void *target_data) { void **host_func_table = ((void ***) host_table)[0]; void **host_funcs_end = ((void ***) host_table)[1]; void **host_var_table = ((void ***) host_table)[2]; void **host_vars_end = ((void ***) host_table)[3]; int i; /* The func table contains only addresses, the var table contains addresses and corresponding sizes. */ int num_funcs = host_funcs_end - host_func_table; int num_vars = (host_vars_end - host_var_table) / 2; gomp_mutex_lock (&register_lock); /* Unload image from all initialized devices. */ for (i = 0; i < num_devices; i++) { int j; struct gomp_device_descr *devicep = &devices[i]; gomp_mutex_lock (&devicep->lock); if (devicep->type != target_type || !devicep->is_initialized) { gomp_mutex_unlock (&devicep->lock); continue; } devicep->unload_image_func (devicep->target_id, target_data); /* Remove mapping from splay tree. */ struct splay_tree_key_s k; splay_tree_key node = NULL; if (num_funcs > 0) { k.host_start = (uintptr_t) host_func_table[0]; k.host_end = k.host_start + 1; node = splay_tree_lookup (&devicep->mem_map, &k); } else if (num_vars > 0) { k.host_start = (uintptr_t) host_var_table[0]; k.host_end = k.host_start + (uintptr_t) host_var_table[1]; node = splay_tree_lookup (&devicep->mem_map, &k); } for (j = 0; j < num_funcs; j++) { k.host_start = (uintptr_t) host_func_table[j]; k.host_end = k.host_start + 1; splay_tree_remove (&devicep->mem_map, &k); } for (j = 0; j < num_vars; j++) { k.host_start = (uintptr_t) host_var_table[j * 2]; k.host_end = k.host_start + (uintptr_t) host_var_table[j * 2 + 1]; splay_tree_remove (&devicep->mem_map, &k); } if (node) { free (node->tgt); free (node); } gomp_mutex_unlock (&devicep->lock); } /* Remove image from array of pending images. */ for (i = 0; i < num_offload_images; i++) if (offload_images[i].target_data == target_data) { offload_images[i] = offload_images[--num_offload_images]; break; } gomp_mutex_unlock (&register_lock); } /* This function initializes the target device, specified by DEVICEP. DEVICEP must be locked on entry, and remains locked on return. */ attribute_hidden void gomp_init_device (struct gomp_device_descr *devicep) { int i; devicep->init_device_func (devicep->target_id); /* Load to device all images registered by the moment. */ for (i = 0; i < num_offload_images; i++) { struct offload_image_descr *image = &offload_images[i]; if (image->type == devicep->type) gomp_offload_image_to_device (devicep, image->host_table, image->target_data, false); } devicep->is_initialized = true; } /* Free address mapping tables. MM must be locked on entry, and remains locked on return. */ attribute_hidden void gomp_free_memmap (struct splay_tree_s *mem_map) { while (mem_map->root) { struct target_mem_desc *tgt = mem_map->root->key.tgt; splay_tree_remove (mem_map, &mem_map->root->key); free (tgt->array); free (tgt); } } /* This function de-initializes the target device, specified by DEVICEP. DEVICEP must be locked on entry, and remains locked on return. */ attribute_hidden void gomp_fini_device (struct gomp_device_descr *devicep) { if (devicep->is_initialized) devicep->fini_device_func (devicep->target_id); devicep->is_initialized = false; } /* Called when encountering a target directive. If DEVICE is GOMP_DEVICE_ICV, it means use device-var ICV. If it is GOMP_DEVICE_HOST_FALLBACK (or any value larger than last available hw device), use host fallback. FN is address of host code, UNUSED is part of the current ABI, but we're not actually using it. HOSTADDRS, SIZES and KINDS are arrays with MAPNUM entries, with addresses of the host objects, sizes of the host objects (resp. for pointer kind pointer bias and assumed sizeof (void *) size) and kinds. */ void GOMP_target (int device, void (*fn) (void *), const void *unused, size_t mapnum, void **hostaddrs, size_t *sizes, unsigned char *kinds) { struct gomp_device_descr *devicep = resolve_device (device); if (devicep == NULL || !(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)) { /* Host fallback. */ struct gomp_thread old_thr, *thr = gomp_thread (); old_thr = *thr; memset (thr, '\0', sizeof (*thr)); if (gomp_places_list) { thr->place = old_thr.place; thr->ts.place_partition_len = gomp_places_list_len; } fn (hostaddrs); gomp_free_thread (thr); *thr = old_thr; return; } gomp_mutex_lock (&devicep->lock); if (!devicep->is_initialized) gomp_init_device (devicep); gomp_mutex_unlock (&devicep->lock); void *fn_addr; if (devicep->capabilities & GOMP_OFFLOAD_CAP_NATIVE_EXEC) fn_addr = (void *) fn; else { gomp_mutex_lock (&devicep->lock); struct splay_tree_key_s k; k.host_start = (uintptr_t) fn; k.host_end = k.host_start + 1; splay_tree_key tgt_fn = splay_tree_lookup (&devicep->mem_map, &k); if (tgt_fn == NULL) { gomp_mutex_unlock (&devicep->lock); gomp_fatal ("Target function wasn't mapped"); } gomp_mutex_unlock (&devicep->lock); fn_addr = (void *) tgt_fn->tgt_offset; } struct target_mem_desc *tgt_vars = gomp_map_vars (devicep, mapnum, hostaddrs, NULL, sizes, kinds, false, true); struct gomp_thread old_thr, *thr = gomp_thread (); old_thr = *thr; memset (thr, '\0', sizeof (*thr)); if (gomp_places_list) { thr->place = old_thr.place; thr->ts.place_partition_len = gomp_places_list_len; } devicep->run_func (devicep->target_id, fn_addr, (void *) tgt_vars->tgt_start); gomp_free_thread (thr); *thr = old_thr; gomp_unmap_vars (tgt_vars, true); } void GOMP_target_data (int device, const void *unused, size_t mapnum, void **hostaddrs, size_t *sizes, unsigned char *kinds) { struct gomp_device_descr *devicep = resolve_device (device); if (devicep == NULL || !(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)) { /* Host fallback. */ struct gomp_task_icv *icv = gomp_icv (false); if (icv->target_data) { /* Even when doing a host fallback, if there are any active #pragma omp target data constructs, need to remember the new #pragma omp target data, otherwise GOMP_target_end_data would get out of sync. */ struct target_mem_desc *tgt = gomp_map_vars (NULL, 0, NULL, NULL, NULL, NULL, false, false); tgt->prev = icv->target_data; icv->target_data = tgt; } return; } gomp_mutex_lock (&devicep->lock); if (!devicep->is_initialized) gomp_init_device (devicep); gomp_mutex_unlock (&devicep->lock); struct target_mem_desc *tgt = gomp_map_vars (devicep, mapnum, hostaddrs, NULL, sizes, kinds, false, false); struct gomp_task_icv *icv = gomp_icv (true); tgt->prev = icv->target_data; icv->target_data = tgt; } void GOMP_target_end_data (void) { struct gomp_task_icv *icv = gomp_icv (false); if (icv->target_data) { struct target_mem_desc *tgt = icv->target_data; icv->target_data = tgt->prev; gomp_unmap_vars (tgt, true); } } void GOMP_target_update (int device, const void *unused, size_t mapnum, void **hostaddrs, size_t *sizes, unsigned char *kinds) { struct gomp_device_descr *devicep = resolve_device (device); if (devicep == NULL || !(devicep->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)) return; gomp_mutex_lock (&devicep->lock); if (!devicep->is_initialized) gomp_init_device (devicep); gomp_mutex_unlock (&devicep->lock); gomp_update (devicep, mapnum, hostaddrs, sizes, kinds, false); } void GOMP_teams (unsigned int num_teams, unsigned int thread_limit) { if (thread_limit) { struct gomp_task_icv *icv = gomp_icv (true); icv->thread_limit_var = thread_limit > INT_MAX ? UINT_MAX : thread_limit; } (void) num_teams; } #ifdef PLUGIN_SUPPORT /* This function tries to load a plugin for DEVICE. Name of plugin is passed in PLUGIN_NAME. The handles of the found functions are stored in the corresponding fields of DEVICE. The function returns TRUE on success and FALSE otherwise. */ static bool gomp_load_plugin_for_device (struct gomp_device_descr *device, const char *plugin_name) { const char *err = NULL, *last_missing = NULL; int optional_present, optional_total; /* Clear any existing error. */ dlerror (); void *plugin_handle = dlopen (plugin_name, RTLD_LAZY); if (!plugin_handle) { err = dlerror (); goto out; } /* Check if all required functions are available in the plugin and store their handlers. */ #define DLSYM(f) \ do \ { \ device->f##_func = dlsym (plugin_handle, "GOMP_OFFLOAD_" #f); \ err = dlerror (); \ if (err != NULL) \ goto out; \ } \ while (0) /* Similar, but missing functions are not an error. */ #define DLSYM_OPT(f, n) \ do \ { \ const char *tmp_err; \ device->f##_func = dlsym (plugin_handle, "GOMP_OFFLOAD_" #n); \ tmp_err = dlerror (); \ if (tmp_err == NULL) \ optional_present++; \ else \ last_missing = #n; \ optional_total++; \ } \ while (0) DLSYM (get_name); DLSYM (get_caps); DLSYM (get_type); DLSYM (get_num_devices); DLSYM (init_device); DLSYM (fini_device); DLSYM (load_image); DLSYM (unload_image); DLSYM (alloc); DLSYM (free); DLSYM (dev2host); DLSYM (host2dev); device->capabilities = device->get_caps_func (); if (device->capabilities & GOMP_OFFLOAD_CAP_OPENMP_400) DLSYM (run); if (device->capabilities & GOMP_OFFLOAD_CAP_OPENACC_200) { optional_present = optional_total = 0; DLSYM_OPT (openacc.exec, openacc_parallel); DLSYM_OPT (openacc.register_async_cleanup, openacc_register_async_cleanup); DLSYM_OPT (openacc.async_test, openacc_async_test); DLSYM_OPT (openacc.async_test_all, openacc_async_test_all); DLSYM_OPT (openacc.async_wait, openacc_async_wait); DLSYM_OPT (openacc.async_wait_async, openacc_async_wait_async); DLSYM_OPT (openacc.async_wait_all, openacc_async_wait_all); DLSYM_OPT (openacc.async_wait_all_async, openacc_async_wait_all_async); DLSYM_OPT (openacc.async_set_async, openacc_async_set_async); DLSYM_OPT (openacc.create_thread_data, openacc_create_thread_data); DLSYM_OPT (openacc.destroy_thread_data, openacc_destroy_thread_data); /* Require all the OpenACC handlers if we have GOMP_OFFLOAD_CAP_OPENACC_200. */ if (optional_present != optional_total) { err = "plugin missing OpenACC handler function"; goto out; } optional_present = optional_total = 0; DLSYM_OPT (openacc.cuda.get_current_device, openacc_get_current_cuda_device); DLSYM_OPT (openacc.cuda.get_current_context, openacc_get_current_cuda_context); DLSYM_OPT (openacc.cuda.get_stream, openacc_get_cuda_stream); DLSYM_OPT (openacc.cuda.set_stream, openacc_set_cuda_stream); /* Make sure all the CUDA functions are there if any of them are. */ if (optional_present && optional_present != optional_total) { err = "plugin missing OpenACC CUDA handler function"; goto out; } } #undef DLSYM #undef DLSYM_OPT out: if (err != NULL) { gomp_error ("while loading %s: %s", plugin_name, err); if (last_missing) gomp_error ("missing function was %s", last_missing); if (plugin_handle) dlclose (plugin_handle); } return err == NULL; } /* This function initializes the runtime needed for offloading. It parses the list of offload targets and tries to load the plugins for these targets. On return, the variables NUM_DEVICES and NUM_DEVICES_OPENMP will be set, and the array DEVICES initialized, containing descriptors for corresponding devices, first the GOMP_OFFLOAD_CAP_OPENMP_400 ones, follows by the others. */ static void gomp_target_init (void) { const char *prefix ="libgomp-plugin-"; const char *suffix = SONAME_SUFFIX (1); const char *cur, *next; char *plugin_name; int i, new_num_devices; num_devices = 0; devices = NULL; cur = OFFLOAD_TARGETS; if (*cur) do { struct gomp_device_descr current_device; next = strchr (cur, ','); plugin_name = (char *) malloc (1 + (next ? next - cur : strlen (cur)) + strlen (prefix) + strlen (suffix)); if (!plugin_name) { num_devices = 0; break; } strcpy (plugin_name, prefix); strncat (plugin_name, cur, next ? next - cur : strlen (cur)); strcat (plugin_name, suffix); if (gomp_load_plugin_for_device (&current_device, plugin_name)) { new_num_devices = current_device.get_num_devices_func (); if (new_num_devices >= 1) { /* Augment DEVICES and NUM_DEVICES. */ devices = realloc (devices, (num_devices + new_num_devices) * sizeof (struct gomp_device_descr)); if (!devices) { num_devices = 0; free (plugin_name); break; } current_device.name = current_device.get_name_func (); /* current_device.capabilities has already been set. */ current_device.type = current_device.get_type_func (); current_device.mem_map.root = NULL; current_device.is_initialized = false; current_device.openacc.data_environ = NULL; for (i = 0; i < new_num_devices; i++) { current_device.target_id = i; devices[num_devices] = current_device; gomp_mutex_init (&devices[num_devices].lock); num_devices++; } } } free (plugin_name); cur = next + 1; } while (next); /* In DEVICES, sort the GOMP_OFFLOAD_CAP_OPENMP_400 ones first, and set NUM_DEVICES_OPENMP. */ struct gomp_device_descr *devices_s = malloc (num_devices * sizeof (struct gomp_device_descr)); if (!devices_s) { num_devices = 0; free (devices); devices = NULL; } num_devices_openmp = 0; for (i = 0; i < num_devices; i++) if (devices[i].capabilities & GOMP_OFFLOAD_CAP_OPENMP_400) devices_s[num_devices_openmp++] = devices[i]; int num_devices_after_openmp = num_devices_openmp; for (i = 0; i < num_devices; i++) if (!(devices[i].capabilities & GOMP_OFFLOAD_CAP_OPENMP_400)) devices_s[num_devices_after_openmp++] = devices[i]; free (devices); devices = devices_s; for (i = 0; i < num_devices; i++) { /* The 'devices' array can be moved (by the realloc call) until we have found all the plugins, so registering with the OpenACC runtime (which takes a copy of the pointer argument) must be delayed until now. */ if (devices[i].capabilities & GOMP_OFFLOAD_CAP_OPENACC_200) goacc_register (&devices[i]); } } #else /* PLUGIN_SUPPORT */ /* If dlfcn.h is unavailable we always fallback to host execution. GOMP_target* routines are just stubs for this case. */ static void gomp_target_init (void) { } #endif /* PLUGIN_SUPPORT */
bfs_csr_mt.c
#include "graph_defs.h" #include "prefetcher.h" typedef struct bfs_metadata_st { char touched; volatile unsigned long queue_next; } bfs_metadata_t; static volatile unsigned long queue_head = ULONG_MAX; static volatile unsigned long vertex_position = 0; static bfs_metadata_t *metadata; static csr_t * volatile graph; unsigned long MAX_CACHE = ULONG_MAX; long MIN_CACHE = 0; unsigned long visited = 0; void prefetcher_random_callback(unsigned long *laf, unsigned long laf_size, unsigned long ift) { static unsigned long old_hoq = ULONG_MAX; unsigned long current_hoq = ULONG_MAX; static unsigned long ra_depth = 0; static char preload = 0; static long pf_visited = 0; unsigned long entries = 0; /* Fill in inner-loop entries from BFS queue */ /* if ((preload == 0) && (ra_depth > MAX_CACHE)) { preload = 1; current_hoq = ULONG_MAX; } */ current_hoq = old_hoq; if ((current_hoq == ULONG_MAX) || (((signed long) (pf_visited - visited)) > MIN_CACHE)/*|| (ra_depth > MIN_CACHE)*/) { current_hoq = queue_head; pf_visited = visited; // ra_depth = 0; } // if (((signed long)(pf_visited - visited)) > MIN_CACHE) return; /* if(current_hoq != ULONG_MAX) { current_hoq = metadata[current_hoq].queue_next; } */ while (entries != ift && current_hoq != ULONG_MAX) { unsigned long page = graph->index[current_hoq]; unsigned long end = graph->index[current_hoq + 1]; page = page >> (ASSUME_PAGE_SHIFT + 3); /* offset is in bits ! */ end = end >> (ASSUME_PAGE_SHIFT + 3); // if(laf[HASH_MODULO(page, laf_size)] != page) { // laf[HASH_MODULO(page, laf_size)] = page; // for (; page <= end; page++) { // if (entries==ift) break; laf[entries] = page; if (end > page) laf[entries + (2 * laf_size)] = end - page; entries++; // } // } old_hoq = current_hoq; current_hoq = metadata[current_hoq].queue_next; pf_visited++; } ra_depth += entries; } unsigned long prefetcher_sequential_callback(unsigned long* aux_offset) { unsigned long offset = graph->index[vertex_position]; return offset >> (ASSUME_PAGE_SHIFT + 3); } unsigned long alist_entries_seen = 0; // #pragma omp threadprivate(current_vertex) unsigned long total_queue_demands = 0; unsigned long queue_above_threshold = 0; unsigned long queue_length = 0; /* returns number of connected components */ static unsigned long bfs(csr_t *graph, unsigned long start_node) { unsigned long i; unsigned long components = 0; unsigned long queue_tail = ULONG_MAX; unsigned long nq_head = ULONG_MAX; unsigned long nq_tail = ULONG_MAX; char* finished_flag = NULL; unsigned long time_comp, time_giant = 0, id_giant; i = start_node; do { vertex_position = i; if (metadata[i].touched == 0) { CLOCK_START(time_comp); metadata[i].touched = 1; components++; BFS_PUSH(nq_head, nq_tail, i, metadata); queue_length = 1; } else { i++; if (i >= graph->vertex_cnt) i = 0; continue; } while (nq_head != ULONG_MAX) { queue_head = nq_head; queue_tail = nq_tail; nq_head = ULONG_MAX; nq_tail = ULONG_MAX; #pragma omp parallel default(shared) { #pragma omp task default(shared) { while (1) { unsigned long current_vertex; char finished = 0; #pragma omp critical (check_queue) { if (queue_head != ULONG_MAX) { current_vertex = BFS_POP(queue_head, queue_tail, metadata); visited++; } else { current_vertex = ULONG_MAX; } } if (current_vertex == ULONG_MAX) break; //fprintf(stderr, "V %ld %d\n", current_vertex, // omp_get_num_threads()); if (current_vertex != ULONG_MAX) { unsigned long lq_head = ULONG_MAX; unsigned long lq_tail = ULONG_MAX; csr_edge_iterator_t iter; csr_init_edge_iterator(graph, current_vertex, &iter); while (csr_iter_step(graph, &iter) == 0) { if (!iter.incoming) { unsigned long target = iter.neighbour; //#pragma omp critical (atomicset) { if (__sync_bool_compare_and_swap(&(metadata[target].touched),0, 1)) { //metadata[target].touched = 1; BFS_PUSH(lq_head, lq_tail, target, metadata); // fprintf(stderr, "T %ld %d\n", target, // omp_get_thread_num()); } } } } #pragma omp critical (stitch) { BFS_STITCH(nq_head, nq_tail, lq_head, lq_tail, metadata); // fprintf(stderr, "%ld %ld %ld %ld\n", nq_head, nq_tail, lq_head, lq_tail); } } } } } } CLOCK_STOP(time_comp); if (time_comp > time_giant) { time_giant = time_comp; id_giant = i; printf("Visited %ld\n", visited); } i = i + 1; if (i >= graph->vertex_cnt) { i = 0; } } while (i != start_node); // fprintf(stderr, "%ld %ld\n", visited, graph->vertex_cnt); assert(visited == graph->vertex_cnt); printf("TIME GIANT COMP %lu\n", time_giant); printf("ID GIANT COMP %lu\n", id_giant); return components; } int main(int argc, char **argv) { unsigned long time_bfs, time_total, components; CLOCK_START(time_total); if (argc < 3) { fprintf(stderr, "Usage %s graph_name root_id\n", argv[0]); exit(-1); } #ifdef PREFETCHER char *env_var; env_var = getenv("CMAX"); if(env_var != NULL) { MAX_CACHE = atol(env_var); } env_var = getenv("CMIN"); if(env_var != NULL) { MIN_CACHE = atol(env_var); } bind_master(); init_prefetcher(prefetcher_random_callback, NULL); // prefetcher_sequential_callback); #endif graph = open_csr(argv[1]); metadata = (bfs_metadata_t*) map_anon_memory(graph->vertex_cnt * sizeof(bfs_metadata_t), "vertex metadata"); //balloon_inflate(); /* Simulate semi-em conditions */ print_mlocked_memory(); unsigned long root_id = atol(argv[2]); assert(root_id < graph->vertex_cnt); /* Perhaps mmap /dev/null instead ? */ memset(metadata, 0, graph->vertex_cnt * sizeof(bfs_metadata_t)); #ifdef PREFETCHER launch_prefetch_thread(graph->fd_calist); #endif struct rusage ru_begin; getrusage(RUSAGE_SELF, &ru_begin); CLOCK_START(time_bfs); components = bfs(graph, root_id); CLOCK_STOP(time_bfs); struct rusage ru_end; getrusage(RUSAGE_SELF, &ru_end); #ifdef PREFETCHER terminate_prefetch_thread(); destroy_prefetcher(); #endif munmap(metadata, graph->vertex_cnt * sizeof(bfs_metadata_t)); close_csr(graph); CLOCK_STOP(time_total); printf("COMPONENTS %lu\n", components); printf("TIME BFS %lu\n", time_bfs); printf("TIME TOTAL %lu\n", time_total); print_rusage_stats(stdout, &ru_begin, &ru_end); printf("F_THRESHOLD %f\n", ((double) queue_above_threshold) / total_queue_demands); return 0; }
GB_binop__bor_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bor_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__bor_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__bor_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__bor_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_uint32) // A*D function (colscale): GB (_AxD__bor_uint32) // D*A function (rowscale): GB (_DxB__bor_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__bor_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__bor_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_uint32) // C=scalar+B GB (_bind1st__bor_uint32) // C=scalar+B' GB (_bind1st_tran__bor_uint32) // C=A+scalar GB (_bind2nd__bor_uint32) // C=A'+scalar GB (_bind2nd_tran__bor_uint32) // C type: uint32_t // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = (aij) | (bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) | (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BOR || GxB_NO_UINT32 || GxB_NO_BOR_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bor_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bor_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__bor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__bor_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bor_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bor_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bor_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bor_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bor_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bor_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x) | (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bor_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) | (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) | (aij) ; \ } GrB_Info GB (_bind1st_tran__bor_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) | (y) ; \ } GrB_Info GB (_bind2nd_tran__bor_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
threads.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt #include "callback.h" #include <omp.h> int main() { int x = 0; #pragma omp parallel num_threads(4) { #pragma omp atomic x++; } // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_thread_begin: // CHECK-SAME: thread_type=ompt_thread_initial=1, thread_id=[[MASTER_ID]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_thread_end: // CHECK-SAME: thread_id=[[MASTER_ID]] // CHECK: {{^}}[[WORKER_ID1:[0-9]+]]: ompt_event_thread_begin: // CHECK-SAME: thread_type=ompt_thread_worker=2, thread_id=[[WORKER_ID1]] // CHECK: {{^}}[[WORKER_ID1]]: ompt_event_thread_end: // CHECK-SAME: thread_id=[[WORKER_ID1]] // CHECK: {{^}}[[WORKER_ID2:[0-9]+]]: ompt_event_thread_begin: // CHECK-SAME: thread_type=ompt_thread_worker=2, thread_id=[[WORKER_ID2]] // CHECK: {{^}}[[WORKER_ID2]]: ompt_event_thread_end: // CHECK-SAME: thread_id=[[WORKER_ID2]] // CHECK: {{^}}[[WORKER_ID3:[0-9]+]]: ompt_event_thread_begin: // CHECK-SAME: thread_type=ompt_thread_worker=2, thread_id=[[WORKER_ID3]] // CHECK: {{^}}[[WORKER_ID3]]: ompt_event_thread_end: // CHECK-SAME: thread_id=[[WORKER_ID3]] return 0; }
OpenMP_PiCalculate_1T.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int num_steps=1000000; double step, pi; int main(){ int i; double x, sum = 0.0; double st = omp_get_wtime(); step = 1.0 / (double) num_steps; #pragma omp parallel for num_threads(1) private(x) reduction(+:sum) for (i = 0; i < num_steps; i++){ x = (i + 0.5) * step; sum = sum + 4.0 / (1.0 + x * x); } pi = step * sum; double et = omp_get_wtime(); printf("Pi = %.10f\n", pi); printf("Execution time: %f second.\n", (et-st)); }
distort.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT % % D D I SS T O O R R T % % D D I SSS T O O RRRR T % % D D I SS T O O R R T % % DDDD IIIII SSSSS T OOO R R T % % % % % % MagickCore Image Distortion Methods % % % % Software Design % % Cristy % % Anthony Thyssen % % June 2007 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distort.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/image.h" #include "MagickCore/linked-list.h" #include "MagickCore/list.h" #include "MagickCore/matrix.h" #include "MagickCore/matrix-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/shear.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" /* Numerous internal routines for image distortions. */ static inline void AffineArgsToCoefficients(double *affine) { /* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */ double tmp[4]; /* note indexes 0 and 5 remain unchanged */ tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4]; affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3]; } static inline void CoefficientsToAffineArgs(double *coeff) { /* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */ double tmp[4]; /* note indexes 0 and 5 remain unchanged */ tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2]; coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3]; } static void InvertAffineCoefficients(const double *coeff,double *inverse) { /* From "Digital Image Warping" by George Wolberg, page 50 */ double determinant; determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]); inverse[0]=determinant*coeff[4]; inverse[1]=determinant*(-coeff[1]); inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]); inverse[3]=determinant*(-coeff[3]); inverse[4]=determinant*coeff[0]; inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]); } static void InvertPerspectiveCoefficients(const double *coeff, double *inverse) { /* From "Digital Image Warping" by George Wolberg, page 53 */ double determinant; determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]); inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]); inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]); inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]); inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]); inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]); inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]); inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]); inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]); } /* * Polynomial Term Defining Functions * * Order must either be an integer, or 1.5 to produce * the 2 number_valuesal polynomial function... * affine 1 (3) u = c0 + c1*x + c2*y * bilinear 1.5 (4) u = '' + c3*x*y * quadratic 2 (6) u = '' + c4*x*x + c5*y*y * cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3 * quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4 * quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5 * number in parenthesis minimum number of points needed. * Anything beyond quintic, has not been implemented until * a more automated way of determining terms is found. * Note the slight re-ordering of the terms for a quadratic polynomial * which is to allow the use of a bi-linear (order=1.5) polynomial. * All the later polynomials are ordered simply from x^N to y^N */ static size_t poly_number_terms(double order) { /* Return the number of terms for a 2d polynomial */ if ( order < 1 || order > 5 || ( order != floor(order) && (order-1.5) > MagickEpsilon) ) return 0; /* invalid polynomial order */ return((size_t) floor((order+1)*(order+2)/2)); } static double poly_basis_fn(ssize_t n, double x, double y) { /* Return the result for this polynomial term */ switch(n) { case 0: return( 1.0 ); /* constant */ case 1: return( x ); case 2: return( y ); /* affine order = 1 terms = 3 */ case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */ case 4: return( x*x ); case 5: return( y*y ); /* quadratic order = 2 terms = 6 */ case 6: return( x*x*x ); case 7: return( x*x*y ); case 8: return( x*y*y ); case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */ case 10: return( x*x*x*x ); case 11: return( x*x*x*y ); case 12: return( x*x*y*y ); case 13: return( x*y*y*y ); case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */ case 15: return( x*x*x*x*x ); case 16: return( x*x*x*x*y ); case 17: return( x*x*x*y*y ); case 18: return( x*x*y*y*y ); case 19: return( x*y*y*y*y ); case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */ } return( 0 ); /* should never happen */ } static const char *poly_basis_str(ssize_t n) { /* return the result for this polynomial term */ switch(n) { case 0: return(""); /* constant */ case 1: return("*ii"); case 2: return("*jj"); /* affine order = 1 terms = 3 */ case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */ case 4: return("*ii*ii"); case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */ case 6: return("*ii*ii*ii"); case 7: return("*ii*ii*jj"); case 8: return("*ii*jj*jj"); case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */ case 10: return("*ii*ii*ii*ii"); case 11: return("*ii*ii*ii*jj"); case 12: return("*ii*ii*jj*jj"); case 13: return("*ii*jj*jj*jj"); case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */ case 15: return("*ii*ii*ii*ii*ii"); case 16: return("*ii*ii*ii*ii*jj"); case 17: return("*ii*ii*ii*jj*jj"); case 18: return("*ii*ii*jj*jj*jj"); case 19: return("*ii*jj*jj*jj*jj"); case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */ } return( "UNKNOWN" ); /* should never happen */ } static double poly_basis_dx(ssize_t n, double x, double y) { /* polynomial term for x derivative */ switch(n) { case 0: return( 0.0 ); /* constant */ case 1: return( 1.0 ); case 2: return( 0.0 ); /* affine order = 1 terms = 3 */ case 3: return( y ); /* bilinear order = 1.5 terms = 4 */ case 4: return( x ); case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */ case 6: return( x*x ); case 7: return( x*y ); case 8: return( y*y ); case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */ case 10: return( x*x*x ); case 11: return( x*x*y ); case 12: return( x*y*y ); case 13: return( y*y*y ); case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */ case 15: return( x*x*x*x ); case 16: return( x*x*x*y ); case 17: return( x*x*y*y ); case 18: return( x*y*y*y ); case 19: return( y*y*y*y ); case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */ } return( 0.0 ); /* should never happen */ } static double poly_basis_dy(ssize_t n, double x, double y) { /* polynomial term for y derivative */ switch(n) { case 0: return( 0.0 ); /* constant */ case 1: return( 0.0 ); case 2: return( 1.0 ); /* affine order = 1 terms = 3 */ case 3: return( x ); /* bilinear order = 1.5 terms = 4 */ case 4: return( 0.0 ); case 5: return( y ); /* quadratic order = 2 terms = 6 */ default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */ } /* NOTE: the only reason that last is not true for 'quadratic' is due to the re-arrangement of terms to allow for 'bilinear' */ } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A f f i n e T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AffineTransformImage() transforms an image as dictated by the affine matrix. % It allocates the memory necessary for the new Image structure and returns % a pointer to the new image. % % The format of the AffineTransformImage method is: % % Image *AffineTransformImage(const Image *image, % AffineMatrix *affine_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o affine_matrix: the affine matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AffineTransformImage(const Image *image, const AffineMatrix *affine_matrix,ExceptionInfo *exception) { double distort[6]; Image *deskew_image; /* Affine transform image. */ assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(affine_matrix != (AffineMatrix *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); distort[0]=affine_matrix->sx; distort[1]=affine_matrix->rx; distort[2]=affine_matrix->ry; distort[3]=affine_matrix->sy; distort[4]=affine_matrix->tx; distort[5]=affine_matrix->ty; deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort, MagickTrue,exception); return(deskew_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e n e r a t e C o e f f i c i e n t s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GenerateCoefficients() takes user provided input arguments and generates % the coefficients, needed to apply the specific distortion for either % distorting images (generally using control points) or generating a color % gradient from sparsely separated color points. % % The format of the GenerateCoefficients() method is: % % Image *GenerateCoefficients(const Image *image,DistortMethod method, % const size_t number_arguments,const double *arguments, % size_t number_values, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be distorted. % % o method: the method of image distortion/ sparse gradient % % o number_arguments: the number of arguments given. % % o arguments: the arguments for this distortion method. % % o number_values: the style and format of given control points, (caller type) % 0: 2 dimensional mapping of control points (Distort) % Format: u,v,x,y where u,v is the 'source' of the % the color to be plotted, for DistortImage() % N: Interpolation of control points with N values (usally r,g,b) % Format: x,y,r,g,b mapping x,y to color values r,g,b % IN future, variable number of values may be given (1 to N) % % o exception: return any errors or warnings in this structure % % Note that the returned array of double values must be freed by the % calling method using RelinquishMagickMemory(). This however may change in % the future to require a more 'method' specific method. % % Because of this this method should not be classed as stable or used % outside other MagickCore library methods. */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } static double *GenerateCoefficients(const Image *image, DistortMethod *method,const size_t number_arguments,const double *arguments, size_t number_values,ExceptionInfo *exception) { double *coeff; size_t i; size_t number_coeff, /* number of coefficients to return (array size) */ cp_size, /* number floating point numbers per control point */ cp_x,cp_y, /* the x,y indexes for control point */ cp_values; /* index of values for this control point */ /* number_values Number of values given per control point */ if ( number_values == 0 ) { /* Image distortion using control points (or other distortion) That is generate a mapping so that x,y->u,v given u,v,x,y */ number_values = 2; /* special case: two values of u,v */ cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */ cp_x = 2; /* location of x,y in input control values */ cp_y = 3; /* NOTE: cp_values, also used for later 'reverse map distort' tests */ } else { cp_x = 0; /* location of x,y in input control values */ cp_y = 1; cp_values = 2; /* and the other values are after x,y */ /* Typically in this case the values are R,G,B color values */ } cp_size = number_values+2; /* each CP defintion involves this many numbers */ /* If not enough control point pairs are found for specific distortions fall back to Affine distortion (allowing 0 to 3 point pairs) */ if ( number_arguments < 4*cp_size && ( *method == BilinearForwardDistortion || *method == BilinearReverseDistortion || *method == PerspectiveDistortion ) ) *method = AffineDistortion; number_coeff=0; switch (*method) { case AffineDistortion: case RigidAffineDistortion: /* also BarycentricColorInterpolate: */ number_coeff=3*number_values; break; case PolynomialDistortion: /* number of coefficents depend on the given polynomal 'order' */ i = poly_number_terms(arguments[0]); number_coeff = 2 + i*number_values; if ( i == 0 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","Polynomial", "Invalid order, should be interger 1 to 5, or 1.5"); return((double *) NULL); } if ( number_arguments < 1+i*cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", "Polynomial", (double) i); return((double *) NULL); } break; case BilinearReverseDistortion: number_coeff=4*number_values; break; /* The rest are constants as they are only used for image distorts */ case BilinearForwardDistortion: number_coeff=10; /* 2*4 coeff plus 2 constants */ cp_x = 0; /* Reverse src/dest coords for forward mapping */ cp_y = 1; cp_values = 2; break; #if 0 case QuadraterialDistortion: number_coeff=19; /* BilinearForward + BilinearReverse */ #endif break; case ShepardsDistortion: number_coeff=1; /* The power factor to use */ break; case ArcDistortion: number_coeff=5; break; case ScaleRotateTranslateDistortion: case AffineProjectionDistortion: case Plane2CylinderDistortion: case Cylinder2PlaneDistortion: number_coeff=6; break; case PolarDistortion: case DePolarDistortion: number_coeff=8; break; case PerspectiveDistortion: case PerspectiveProjectionDistortion: number_coeff=9; break; case BarrelDistortion: case BarrelInverseDistortion: number_coeff=10; break; default: perror("unknown method given"); /* just fail assertion */ } /* allocate the array of coefficients needed */ coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff)); if (coeff == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "GenerateCoefficients"); return((double *) NULL); } /* zero out coefficients array */ for (i=0; i < number_coeff; i++) coeff[i] = 0.0; switch (*method) { case AffineDistortion: { /* Affine Distortion v = c0*x + c1*y + c2 for each 'value' given Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", "Affine", 1.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* handle special cases of not enough arguments */ if ( number_arguments == cp_size ) { /* Only 1 CP Set Given */ if ( cp_values == 0 ) { /* image distortion - translate the image */ coeff[0] = 1.0; coeff[2] = arguments[0] - arguments[2]; coeff[4] = 1.0; coeff[5] = arguments[1] - arguments[3]; } else { /* sparse gradient - use the values directly */ for (i=0; i<number_values; i++) coeff[i*3+2] = arguments[cp_values+i]; } } else { /* 2 or more points (usally 3) given. Solve a least squares simultaneous equation for coefficients. */ double **matrix, **vectors, terms[3]; MagickBooleanType status; /* create matrix, and a fake vectors matrix */ matrix = AcquireMagickMatrix(3UL,3UL); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); if (matrix == (double **) NULL || vectors == (double **) NULL) { matrix = RelinquishMagickMatrix(matrix, 3UL); vectors = (double **) RelinquishMagickMemory(vectors); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x3 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[i*3]); /* Add given control point pairs for least squares solving */ for (i=0; i < number_arguments; i+=cp_size) { terms[0] = arguments[i+cp_x]; /* x */ terms[1] = arguments[i+cp_y]; /* y */ terms[2] = 1; /* 1 */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),3UL,number_values); } if ( number_arguments == 2*cp_size ) { /* Only two pairs were given, but we need 3 to solve the affine. Fake extra coordinates by rotating p1 around p0 by 90 degrees. x2 = x0 - (y1-y0) y2 = y0 + (x1-x0) */ terms[0] = arguments[cp_x] - ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */ terms[1] = arguments[cp_y] + + ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */ terms[2] = 1; /* 1 */ if ( cp_values == 0 ) { /* Image Distortion - rotate the u,v coordients too */ double uv2[2]; uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */ uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */ LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL); } else { /* Sparse Gradient - use values of p0 for linear gradient */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[cp_values]),3UL,number_values); } } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,3UL,number_values); matrix = RelinquishMagickMatrix(matrix, 3UL); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } } return(coeff); } case RigidAffineDistortion: { double inverse[6], **matrix, terms[5], *vectors[1]; MagickBooleanType status; /* Rigid affine (also known as a Euclidean transform), restricts affine coefficients to 4 (S, R, Tx, Ty) with Sy=Sx and Ry = -Rx so that one has only scale, rotation and translation. No skew. */ if (((number_arguments % cp_size) != 0) || (number_arguments < cp_size)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", CommandOptionToMnemonic(MagickDistortOptions,*method),2.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* Rigid affine requires a 4x4 least-squares matrix (zeroed). */ matrix=AcquireMagickMatrix(4UL,4UL); if (matrix == (double **) NULL) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", CommandOptionToMnemonic(MagickDistortOptions,*method)); return((double *) NULL); } /* Add control points for least squares solving. */ vectors[0]=(&(coeff[0])); for (i=0; i < number_arguments; i+=4) { terms[0]=arguments[i+0]; terms[1]=(-arguments[i+1]); terms[2]=1.0; terms[3]=0.0; LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+2]),4UL,1UL); terms[0]=arguments[i+1]; terms[1]=arguments[i+0]; terms[2]=0.0; terms[3]=1.0; LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+3]),4UL,1UL); } /* Solve for least-squares coefficients. */ status=GaussJordanElimination(matrix,vectors,4UL,1UL); matrix=RelinquishMagickMatrix(matrix,4UL); if (status == MagickFalse) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions,*method)); return((double *) NULL); } /* Convert (S, R, Tx, Ty) to an affine projection. */ inverse[0]=coeff[0]; inverse[1]=coeff[1]; inverse[2]=(-coeff[1]); inverse[3]=coeff[0]; inverse[4]=coeff[2]; inverse[5]=coeff[3]; AffineArgsToCoefficients(inverse); InvertAffineCoefficients(inverse,coeff); *method=AffineDistortion; return(coeff); } case AffineProjectionDistortion: { /* Arguments: Affine Matrix (forward mapping) Arguments sx, rx, ry, sy, tx, ty Where u = sx*x + ry*y + tx v = rx*x + sy*y + ty Returns coefficients (in there inverse form) ordered as... sx ry tx rx sy ty AffineProjection Distortion Notes... + Will only work with a 2 number_values for Image Distortion + Can not be used for generating a sparse gradient (interpolation) */ double inverse[8]; if (number_arguments != 6) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Needs 6 coeff values'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */ for(i=0; i<6UL; i++ ) inverse[i] = arguments[i]; AffineArgsToCoefficients(inverse); /* map into coefficents */ InvertAffineCoefficients(inverse, coeff); /* invert */ *method = AffineDistortion; return(coeff); } case ScaleRotateTranslateDistortion: { /* Scale, Rotate and Translate Distortion An alternative Affine Distortion Argument options, by number of arguments given: 7: x,y, sx,sy, a, nx,ny 6: x,y, s, a, nx,ny 5: x,y, sx,sy, a 4: x,y, s, a 3: x,y, a 2: s, a 1: a Where actions are (in order of application) x,y 'center' of transforms (default = image center) sx,sy scale image by this amount (default = 1) a angle of rotation (argument required) nx,ny move 'center' here (default = x,y or no movement) And convert to affine mapping coefficients ScaleRotateTranslate Distortion Notes... + Does not use a set of CPs in any normal way + Will only work with a 2 number_valuesal Image Distortion + Cannot be used for generating a sparse gradient (interpolation) */ double cosine, sine, x,y,sx,sy,a,nx,ny; /* set default center, and default scale */ x = nx = (double)(image->columns)/2.0 + (double)image->page.x; y = ny = (double)(image->rows)/2.0 + (double)image->page.y; sx = sy = 1.0; switch ( number_arguments ) { case 0: coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Needs at least 1 argument'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); case 1: a = arguments[0]; break; case 2: sx = sy = arguments[0]; a = arguments[1]; break; default: x = nx = arguments[0]; y = ny = arguments[1]; switch ( number_arguments ) { case 3: a = arguments[2]; break; case 4: sx = sy = arguments[2]; a = arguments[3]; break; case 5: sx = arguments[2]; sy = arguments[3]; a = arguments[4]; break; case 6: sx = sy = arguments[2]; a = arguments[3]; nx = arguments[4]; ny = arguments[5]; break; case 7: sx = arguments[2]; sy = arguments[3]; a = arguments[4]; nx = arguments[5]; ny = arguments[6]; break; default: coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Too Many Arguments (7 or less)'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } break; } /* Trap if sx or sy == 0 -- image is scaled out of existance! */ if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Zero Scale Given'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* Save the given arguments as an affine distortion */ a=DegreesToRadians(a); cosine=cos(a); sine=sin(a); *method = AffineDistortion; coeff[0]=cosine/sx; coeff[1]=sine/sx; coeff[2]=x-nx*coeff[0]-ny*coeff[1]; coeff[3]=(-sine)/sy; coeff[4]=cosine/sy; coeff[5]=y-nx*coeff[3]-ny*coeff[4]; return(coeff); } case PerspectiveDistortion: { /* Perspective Distortion (a ratio of affine distortions) p(x,y) c0*x + c1*y + c2 u = ------ = ------------------ r(x,y) c6*x + c7*y + 1 q(x,y) c3*x + c4*y + c5 v = ------ = ------------------ r(x,y) c6*x + c7*y + 1 c8 = Sign of 'r', or the denominator affine, for the actual image. This determines what part of the distorted image is 'ground' side of the horizon, the other part is 'sky' or invalid. Valid values are +1.0 or -1.0 only. Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... Perspective Distortion Notes... + Can be thought of as ratio of 3 affine transformations + Not separatable: r() or c6 and c7 are used by both equations + All 8 coefficients must be determined simultaniously + Will only work with a 2 number_valuesal Image Distortion + Can not be used for generating a sparse gradient (interpolation) + It is not linear, but is simple to generate an inverse + All lines within an image remain lines. + but distances between points may vary. */ double **matrix, *vectors[1], terms[8]; size_t cp_u = cp_values, cp_v = cp_values+1; MagickBooleanType status; if ( number_arguments%cp_size != 0 || number_arguments < cp_size*4 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* fake 1x8 vectors matrix directly using the coefficients array */ vectors[0] = &(coeff[0]); /* 8x8 least-squares matrix (zeroed) */ matrix = AcquireMagickMatrix(8UL,8UL); if (matrix == (double **) NULL) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* Add control points for least squares solving */ for (i=0; i < number_arguments; i+=4) { terms[0]=arguments[i+cp_x]; /* c0*x */ terms[1]=arguments[i+cp_y]; /* c1*y */ terms[2]=1.0; /* c2*1 */ terms[3]=0.0; terms[4]=0.0; terms[5]=0.0; terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */ terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */ LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]), 8UL,1UL); terms[0]=0.0; terms[1]=0.0; terms[2]=0.0; terms[3]=arguments[i+cp_x]; /* c3*x */ terms[4]=arguments[i+cp_y]; /* c4*y */ terms[5]=1.0; /* c5*1 */ terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */ terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */ LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]), 8UL,1UL); } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,8UL,1UL); matrix = RelinquishMagickMatrix(matrix, 8UL); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* Calculate 9'th coefficient! The ground-sky determination. What is sign of the 'ground' in r() denominator affine function? Just use any valid image coordinate (first control point) in destination for determination of what part of view is 'ground'. */ coeff[8] = coeff[6]*arguments[cp_x] + coeff[7]*arguments[cp_y] + 1.0; coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0; return(coeff); } case PerspectiveProjectionDistortion: { /* Arguments: Perspective Coefficents (forward mapping) */ if (number_arguments != 8) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'Needs 8 coefficient values'", CommandOptionToMnemonic(MagickDistortOptions, *method)); return((double *) NULL); } /* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */ InvertPerspectiveCoefficients(arguments, coeff); /* Calculate 9'th coefficient! The ground-sky determination. What is sign of the 'ground' in r() denominator affine function? Just use any valid image cocodinate in destination for determination. For a forward mapped perspective the images 0,0 coord will map to c2,c5 in the distorted image, so set the sign of denominator of that. */ coeff[8] = coeff[6]*arguments[2] + coeff[7]*arguments[5] + 1.0; coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0; *method = PerspectiveDistortion; return(coeff); } case BilinearForwardDistortion: case BilinearReverseDistortion: { /* Bilinear Distortion (Forward mapping) v = c0*x + c1*y + c2*x*y + c3; for each 'value' given This is actually a simple polynomial Distortion! The difference however is when we need to reverse the above equation to generate a BilinearForwardDistortion (see below). Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ double **matrix, **vectors, terms[4]; MagickBooleanType status; /* check the number of arguments */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size*4 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* create matrix, and a fake vectors matrix */ matrix = AcquireMagickMatrix(4UL,4UL); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); if (matrix == (double **) NULL || vectors == (double **) NULL) { matrix = RelinquishMagickMatrix(matrix, 4UL); vectors = (double **) RelinquishMagickMemory(vectors); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x4 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[i*4]); /* Add given control point pairs for least squares solving */ for (i=0; i < number_arguments; i+=cp_size) { terms[0] = arguments[i+cp_x]; /* x */ terms[1] = arguments[i+cp_y]; /* y */ terms[2] = terms[0]*terms[1]; /* x*y */ terms[3] = 1; /* 1 */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),4UL,number_values); } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,4UL,number_values); matrix = RelinquishMagickMatrix(matrix, 4UL); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } if ( *method == BilinearForwardDistortion ) { /* Bilinear Forward Mapped Distortion The above least-squares solved for coefficents but in the forward direction, due to changes to indexing constants. i = c0*x + c1*y + c2*x*y + c3; j = c4*x + c5*y + c6*x*y + c7; where i,j are in the destination image, NOT the source. Reverse Pixel mapping however needs to use reverse of these functions. It required a full page of algbra to work out the reversed mapping formula, but resolves down to the following... c8 = c0*c5-c1*c4; c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula i = i - c3; j = j - c7; b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0 c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a) r = b*b - c9*(c+c); if ( c9 != 0 ) y = ( -b + sqrt(r) ) / c9; else y = -c/b; x = ( i - c1*y) / ( c1 - c2*y ); NB: if 'r' is negative there is no solution! NB: the sign of the sqrt() should be negative if image becomes flipped or flopped, or crosses over itself. NB: techniqually coefficient c5 is not needed, anymore, but kept for completness. See Anthony Thyssen <A.Thyssen@griffith.edu.au> or Fred Weinhaus <fmw@alink.net> for more details. */ coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4]; coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]); } return(coeff); } #if 0 case QuadrilateralDistortion: { /* Map a Quadrilateral to a unit square using BilinearReverse Then map that unit square back to the final Quadrilateral using BilinearForward. Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ /* UNDER CONSTRUCTION */ return(coeff); } #endif case PolynomialDistortion: { /* Polynomial Distortion First two coefficents are used to hole global polynomal information c0 = Order of the polynimial being created c1 = number_of_terms in one polynomial equation Rest of the coefficients map to the equations.... v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ... for each 'value' (number_values of them) given. As such total coefficients = 2 + number_terms * number_values Input Arguments are sets of control points... For Distort Images order [u,v, x,y] ... For Sparse Gradients order [x,y, r,g,b] ... Polynomial Distortion Notes... + UNDER DEVELOPMENT -- Do not expect this to remain as is. + Currently polynomial is a reversed mapped distortion. + Order 1.5 is fudged to map into a bilinear distortion. though it is not the same order as that distortion. */ double **matrix, **vectors, *terms; size_t nterms; /* number of polynomial terms per number_values */ ssize_t j; MagickBooleanType status; /* first two coefficients hold polynomial order information */ coeff[0] = arguments[0]; coeff[1] = (double) poly_number_terms(arguments[0]); nterms = (size_t) coeff[1]; /* create matrix, a fake vectors matrix, and least sqs terms */ matrix = AcquireMagickMatrix(nterms,nterms); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms)); if (matrix == (double **) NULL || vectors == (double **) NULL || terms == (double *) NULL ) { matrix = RelinquishMagickMatrix(matrix, nterms); vectors = (double **) RelinquishMagickMemory(vectors); terms = (double *) RelinquishMagickMemory(terms); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x3 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[2+i*nterms]); /* Add given control point pairs for least squares solving */ for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */ for (j=0; j < (ssize_t) nterms; j++) terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]); LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),nterms,number_values); } terms = (double *) RelinquishMagickMemory(terms); /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,nterms,number_values); matrix = RelinquishMagickMatrix(matrix, nterms); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } return(coeff); } case ArcDistortion: { /* Arc Distortion Args: arc_width rotate top_edge_radius bottom_edge_radius All but first argument are optional arc_width The angle over which to arc the image side-to-side rotate Angle to rotate image from vertical center top_radius Set top edge of source image at this radius bottom_radius Set bootom edge to this radius (radial scaling) By default, if the radii arguments are nor provided the image radius is calculated so the horizontal center-line is fits the given arc without scaling. The output image size is ALWAYS adjusted to contain the whole image, and an offset is given to position image relative to the 0,0 point of the origin, allowing users to use relative positioning onto larger background (via -flatten). The arguments are converted to these coefficients c0: angle for center of source image c1: angle scale for mapping to source image c2: radius for top of source image c3: radius scale for mapping source image c4: centerline of arc within source image Note the coefficients use a center angle, so asymptotic join is furthest from both sides of the source image. This also means that for arc angles greater than 360 the sides of the image will be trimmed equally. Arc Distortion Notes... + Does not use a set of CPs + Will only work with Image Distortion + Can not be used for generating a sparse gradient (interpolation) */ if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Arc Angle Too Small'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Outer Radius Too Small'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } coeff[0] = -MagickPI2; /* -90, place at top! */ if ( number_arguments >= 1 ) coeff[1] = DegreesToRadians(arguments[0]); else coeff[1] = MagickPI2; /* zero arguments - center is at top */ if ( number_arguments >= 2 ) coeff[0] += DegreesToRadians(arguments[1]); coeff[0] /= Magick2PI; /* normalize radians */ coeff[0] -= MagickRound(coeff[0]); coeff[0] *= Magick2PI; /* de-normalize back to radians */ coeff[3] = (double)image->rows-1; coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0; if ( number_arguments >= 3 ) { if ( number_arguments >= 4 ) coeff[3] = arguments[2] - arguments[3]; else coeff[3] *= arguments[2]/coeff[2]; coeff[2] = arguments[2]; } coeff[4] = ((double)image->columns-1.0)/2.0; return(coeff); } case PolarDistortion: case DePolarDistortion: { /* (De)Polar Distortion (same set of arguments) Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato DePolar can also have the extra arguments of Width, Height Coefficients 0 to 5 is the sanatized version first 6 input args Coefficient 6 is the angle to coord ratio and visa-versa Coefficient 7 is the radius to coord ratio and visa-versa WARNING: It is possible for Radius max<min and/or Angle from>to */ if ( number_arguments == 3 || ( number_arguments > 6 && *method == PolarDistortion ) || number_arguments > 8 ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument", "%s : number of arguments", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* Rmax - if 0 calculate appropriate value */ if ( number_arguments >= 1 ) coeff[0] = arguments[0]; else coeff[0] = 0.0; /* Rmin - usally 0 */ coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0; /* Center X,Y */ if ( number_arguments >= 4 ) { coeff[2] = arguments[2]; coeff[3] = arguments[3]; } else { /* center of actual image */ coeff[2] = (double)(image->columns)/2.0+image->page.x; coeff[3] = (double)(image->rows)/2.0+image->page.y; } /* Angle from,to - about polar center 0 is downward */ coeff[4] = -MagickPI; if ( number_arguments >= 5 ) coeff[4] = DegreesToRadians(arguments[4]); coeff[5] = coeff[4]; if ( number_arguments >= 6 ) coeff[5] = DegreesToRadians(arguments[5]); if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon ) coeff[5] += Magick2PI; /* same angle is a full circle */ /* if radius 0 or negative, its a special value... */ if ( coeff[0] < MagickEpsilon ) { /* Use closest edge if radius == 0 */ if ( fabs(coeff[0]) < MagickEpsilon ) { coeff[0]=MagickMin(fabs(coeff[2]-image->page.x), fabs(coeff[3]-image->page.y)); coeff[0]=MagickMin(coeff[0], fabs(coeff[2]-image->page.x-image->columns)); coeff[0]=MagickMin(coeff[0], fabs(coeff[3]-image->page.y-image->rows)); } /* furthest diagonal if radius == -1 */ if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) { double rx,ry; rx = coeff[2]-image->page.x; ry = coeff[3]-image->page.y; coeff[0] = rx*rx+ry*ry; ry = coeff[3]-image->page.y-image->rows; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); rx = coeff[2]-image->page.x-image->columns; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); ry = coeff[3]-image->page.y; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); coeff[0] = sqrt(coeff[0]); } } /* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */ if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon || (coeff[0]-coeff[1]) < MagickEpsilon ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : Invalid Radius", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* converstion ratios */ if ( *method == PolarDistortion ) { coeff[6]=(double) image->columns/(coeff[5]-coeff[4]); coeff[7]=(double) image->rows/(coeff[0]-coeff[1]); } else { /* *method == DePolarDistortion */ coeff[6]=(coeff[5]-coeff[4])/image->columns; coeff[7]=(coeff[0]-coeff[1])/image->rows; } return(coeff); } case Cylinder2PlaneDistortion: case Plane2CylinderDistortion: { /* 3D Cylinder to/from a Tangential Plane Projection between a clinder and flat plain from a point on the center line of the cylinder. The two surfaces coincide in 3D space at the given centers of distortion (perpendicular to projection point) on both images. Args: FOV_arc_width Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y FOV (Field Of View) the angular field of view of the distortion, across the width of the image, in degrees. The centers are the points of least distortion in the input and resulting images. These centers are however determined later. Coeff 0 is the FOV angle of view of image width in radians Coeff 1 is calculated radius of cylinder. Coeff 2,3 center of distortion of input image Coefficents 4,5 Center of Distortion of dest (determined later) */ if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : Invalid FOV Angle", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } coeff[0] = DegreesToRadians(arguments[0]); if ( *method == Cylinder2PlaneDistortion ) /* image is curved around cylinder, so FOV angle (in radians) * scales directly to image X coordinate, according to its radius. */ coeff[1] = (double) image->columns/coeff[0]; else /* radius is distance away from an image with this angular FOV */ coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) ); coeff[2] = (double)(image->columns)/2.0+image->page.x; coeff[3] = (double)(image->rows)/2.0+image->page.y; coeff[4] = coeff[2]; coeff[5] = coeff[3]; /* assuming image size is the same */ return(coeff); } case BarrelDistortion: case BarrelInverseDistortion: { /* Barrel Distortion Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd BarrelInv Distortion Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D) Where Rd is the normalized radius from corner to middle of image Input Arguments are one of the following forms (number of arguments)... 3: A,B,C 4: A,B,C,D 5: A,B,C X,Y 6: A,B,C,D X,Y 8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy 10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y Returns 10 coefficent values, which are de-normalized (pixel scale) Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc */ /* Radius de-normalization scaling factor */ double rscale = 2.0/MagickMin((double) image->columns,(double) image->rows); /* sanity check number of args must = 3,4,5,6,8,10 or error */ if ( (number_arguments < 3) || (number_arguments == 7) || (number_arguments == 9) || (number_arguments > 10) ) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument", "%s : number of arguments", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* A,B,C,D coefficients */ coeff[0] = arguments[0]; coeff[1] = arguments[1]; coeff[2] = arguments[2]; if ((number_arguments == 3) || (number_arguments == 5) ) coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2]; else coeff[3] = arguments[3]; /* de-normalize the coefficients */ coeff[0] *= pow(rscale,3.0); coeff[1] *= rscale*rscale; coeff[2] *= rscale; /* Y coefficients: as given OR same as X coefficients */ if ( number_arguments >= 8 ) { coeff[4] = arguments[4] * pow(rscale,3.0); coeff[5] = arguments[5] * rscale*rscale; coeff[6] = arguments[6] * rscale; coeff[7] = arguments[7]; } else { coeff[4] = coeff[0]; coeff[5] = coeff[1]; coeff[6] = coeff[2]; coeff[7] = coeff[3]; } /* X,Y Center of Distortion (image coodinates) */ if ( number_arguments == 5 ) { coeff[8] = arguments[3]; coeff[9] = arguments[4]; } else if ( number_arguments == 6 ) { coeff[8] = arguments[4]; coeff[9] = arguments[5]; } else if ( number_arguments == 10 ) { coeff[8] = arguments[8]; coeff[9] = arguments[9]; } else { /* center of the image provided (image coodinates) */ coeff[8] = (double)image->columns/2.0 + image->page.x; coeff[9] = (double)image->rows/2.0 + image->page.y; } return(coeff); } case ShepardsDistortion: { /* Shepards Distortion input arguments are the coefficents! Just check the number of arguments is valid! Args: u1,v1, x1,y1, ... OR : u1,v1, r1,g1,c1, ... */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'requires CP's (4 numbers each)'", CommandOptionToMnemonic(MagickDistortOptions, *method)); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* User defined weighting power for Shepard's Method */ { const char *artifact=GetImageArtifact(image,"shepards:power"); if ( artifact != (const char *) NULL ) { coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0; if ( coeff[0] < MagickEpsilon ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument","%s", "-define shepards:power" ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } } else coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */ } return(coeff); } default: break; } /* you should never reach this point */ perror("no method handler"); /* just fail assertion */ return((double *) NULL); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s t o r t R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DistortResizeImage() resize image using the equivalent but slower image % distortion operator. The filter is applied using a EWA cylindrical % resampling. But like resize the final image size is limited to whole pixels % with no effects by virtual-pixels on the result. % % Note that images containing a transparency channel will be twice as slow to % resize as images one without transparency. % % The format of the DistortResizeImage method is: % % Image *DistortResizeImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *DistortResizeImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define DistortResizeImageTag "Distort/Image" Image *resize_image, *tmp_image; RectangleInfo crop_area; double distort_args[12]; VirtualPixelMethod vp_save; /* Distort resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); /* Do not short-circuit this resize if final image size is unchanged */ (void) memset(distort_args,0,sizeof(distort_args)); distort_args[4]=(double) image->columns; distort_args[6]=(double) columns; distort_args[9]=(double) image->rows; distort_args[11]=(double) rows; vp_save=GetImageVirtualPixelMethod(image); tmp_image=CloneImage(image,0,0,MagickTrue,exception); if (tmp_image == (Image *) NULL) return((Image *) NULL); (void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod, exception); if (image->alpha_trait == UndefinedPixelTrait) { /* Image has no alpha channel, so we are free to use it. */ (void) SetImageAlphaChannel(tmp_image,SetAlphaChannel,exception); resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if (resize_image == (Image *) NULL) return((Image *) NULL); (void) SetImageAlphaChannel(resize_image,OffAlphaChannel,exception); } else { /* Image has transparency so handle colors and alpha separatly. Basically we need to separate Virtual-Pixel alpha in the resized image, so only the actual original images alpha channel is used. distort alpha channel separately */ Image *resize_alpha; (void) SetImageAlphaChannel(tmp_image,ExtractAlphaChannel,exception); (void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel,exception); resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if (resize_alpha == (Image *) NULL) return((Image *) NULL); /* distort the actual image containing alpha + VP alpha */ tmp_image=CloneImage(image,0,0,MagickTrue,exception); if (tmp_image == (Image *) NULL) return((Image *) NULL); (void) SetImageVirtualPixelMethod(tmp_image, TransparentVirtualPixelMethod,exception); resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if (resize_image == (Image *) NULL) { resize_alpha=DestroyImage(resize_alpha); return((Image *) NULL); } /* replace resize images alpha with the separally distorted alpha */ (void) SetImageAlphaChannel(resize_image,OffAlphaChannel,exception); (void) SetImageAlphaChannel(resize_alpha,OffAlphaChannel,exception); (void) CompositeImage(resize_image,resize_alpha,CopyAlphaCompositeOp, MagickTrue,0,0,exception); resize_alpha=DestroyImage(resize_alpha); resize_image->alpha_trait=image->alpha_trait; resize_image->compose=image->compose; } (void) SetImageVirtualPixelMethod(resize_image,vp_save,exception); /* Clean up the results of the Distortion */ crop_area.width=columns; crop_area.height=rows; crop_area.x=0; crop_area.y=0; tmp_image=resize_image; resize_image=CropImage(tmp_image,&crop_area,exception); tmp_image=DestroyImage(tmp_image); if (resize_image != (Image *) NULL) { resize_image->page.width=0; resize_image->page.height=0; } return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D i s t o r t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DistortImage() distorts an image using various distortion methods, by % mapping color lookups of the source image to a new destination image % usally of the same size as the source image, unless 'bestfit' is set to % true. % % If 'bestfit' is enabled, and distortion allows it, the destination image is % adjusted to ensure the whole source 'image' will just fit within the final % destination image, which will be sized and offset accordingly. Also in % many cases the virtual offset of the source image will be taken into % account in the mapping. % % If the '-verbose' control option has been set print to standard error the % equicelent '-fx' formula with coefficients for the function, if practical. % % The format of the DistortImage() method is: % % Image *DistortImage(const Image *image,const DistortMethod method, % const size_t number_arguments,const double *arguments, % MagickBooleanType bestfit, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be distorted. % % o method: the method of image distortion. % % ArcDistortion always ignores source image offset, and always % 'bestfit' the destination image with the top left corner offset % relative to the polar mapping center. % % Affine, Perspective, and Bilinear, do least squares fitting of the % distrotion when more than the minimum number of control point pairs % are provided. % % Perspective, and Bilinear, fall back to a Affine distortion when less % than 4 control point pairs are provided. While Affine distortions % let you use any number of control point pairs, that is Zero pairs is % a No-Op (viewport only) distortion, one pair is a translation and % two pairs of control points do a scale-rotate-translate, without any % shearing. % % o number_arguments: the number of arguments given. % % o arguments: an array of floating point arguments for this method. % % o bestfit: Attempt to 'bestfit' the size of the resulting image. % This also forces the resulting image to be a 'layered' virtual % canvas image. Can be overridden using 'distort:viewport' setting. % % o exception: return any errors or warnings in this structure % % Extra Controls from Image meta-data (artifacts)... % % o "verbose" % Output to stderr alternatives, internal coefficents, and FX % equivalents for the distortion operation (if feasible). % This forms an extra check of the distortion method, and allows users % access to the internal constants IM calculates for the distortion. % % o "distort:viewport" % Directly set the output image canvas area and offest to use for the % resulting image, rather than use the original images canvas, or a % calculated 'bestfit' canvas. % % o "distort:scale" % Scale the size of the output canvas by this amount to provide a % method of Zooming, and for super-sampling the results. % % Other settings that can effect results include % % o 'interpolate' For source image lookups (scale enlargements) % % o 'filter' Set filter to use for area-resampling (scale shrinking). % Set to 'point' to turn off and use 'interpolate' lookup % instead % */ MagickExport Image *DistortImage(const Image *image, DistortMethod method, const size_t number_arguments,const double *arguments, MagickBooleanType bestfit,ExceptionInfo *exception) { #define DistortImageTag "Distort/Image" double *coeff, output_scaling; Image *distort_image; RectangleInfo geometry; /* geometry of the distorted space viewport */ MagickBooleanType viewport_given; PixelInfo invalid; /* the color to assign when distort result is invalid */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Handle Special Compound Distortions */ if ( method == ResizeDistortion ) { if ( number_arguments != 2 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","Resize", "Invalid number of args: 2 only"); return((Image *) NULL); } distort_image=DistortResizeImage(image,(size_t)arguments[0], (size_t)arguments[1], exception); return(distort_image); } /* Convert input arguments (usually as control points for reverse mapping) into mapping coefficients to apply the distortion. Note that some distortions are mapped to other distortions, and as such do not require specific code after this point. */ coeff = GenerateCoefficients(image, &method, number_arguments, arguments, 0, exception); if ( coeff == (double *) NULL ) return((Image *) NULL); /* Determine the size and offset for a 'bestfit' destination. Usally the four corners of the source image is enough. */ /* default output image bounds, when no 'bestfit' is requested */ geometry.width=image->columns; geometry.height=image->rows; geometry.x=0; geometry.y=0; if ( method == ArcDistortion ) { bestfit = MagickTrue; /* always calculate a 'best fit' viewport */ } /* Work out the 'best fit', (required for ArcDistortion) */ if ( bestfit ) { PointInfo s,d,min,max; /* source, dest coords --mapping--> min, max coords */ MagickBooleanType fix_bounds = MagickTrue; /* enlarge bounds for VP handling */ s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */ /* defines to figure out the bounds of the distorted image */ #define InitalBounds(p) \ { \ /* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \ min.x = max.x = p.x; \ min.y = max.y = p.y; \ } #define ExpandBounds(p) \ { \ /* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \ min.x = MagickMin(min.x,p.x); \ max.x = MagickMax(max.x,p.x); \ min.y = MagickMin(min.y,p.y); \ max.y = MagickMax(max.y,p.y); \ } switch (method) { case AffineDistortion: case RigidAffineDistortion: { double inverse[6]; InvertAffineCoefficients(coeff, inverse); s.x = (double) image->page.x; s.y = (double) image->page.y; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; InitalBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); s.x = (double) image->page.x; s.y = (double) image->page.y+image->rows; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y+image->rows; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); break; } case PerspectiveDistortion: { double inverse[8], scale; InvertPerspectiveCoefficients(coeff, inverse); s.x = (double) image->page.x; s.y = (double) image->page.y; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); InitalBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); s.x = (double) image->page.x; s.y = (double) image->page.y+image->rows; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y+image->rows; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); break; } case ArcDistortion: { double a, ca, sa; /* Forward Map Corners */ a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; InitalBounds(d); d.x = (coeff[2]-coeff[3])*ca; d.y = (coeff[2]-coeff[3])*sa; ExpandBounds(d); a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; ExpandBounds(d); d.x = (coeff[2]-coeff[3])*ca; d.y = (coeff[2]-coeff[3])*sa; ExpandBounds(d); /* Orthogonal points along top of arc */ for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2); a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) { ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; ExpandBounds(d); } /* Convert the angle_to_width and radius_to_height to appropriate scaling factors, to allow faster processing in the mapping function. */ coeff[1] = (double) (Magick2PI*image->columns/coeff[1]); coeff[3] = (double)image->rows/coeff[3]; break; } case PolarDistortion: { if (number_arguments < 2) coeff[2] = coeff[3] = 0.0; min.x = coeff[2]-coeff[0]; max.x = coeff[2]+coeff[0]; min.y = coeff[3]-coeff[0]; max.y = coeff[3]+coeff[0]; /* should be about 1.0 if Rmin = 0 */ coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]); break; } case DePolarDistortion: { /* direct calculation as it needs to tile correctly * for reversibility in a DePolar-Polar cycle */ fix_bounds = MagickFalse; geometry.x = geometry.y = 0; geometry.height = (size_t) ceil(coeff[0]-coeff[1]); geometry.width = (size_t) ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5); /* correct scaling factors relative to new size */ coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */ coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */ break; } case Cylinder2PlaneDistortion: { /* direct calculation so center of distortion is either a pixel * center, or pixel edge. This allows for reversibility of the * distortion */ geometry.x = geometry.y = 0; geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) ); geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) ); /* correct center of distortion relative to new size */ coeff[4] = (double) geometry.width/2.0; coeff[5] = (double) geometry.height/2.0; fix_bounds = MagickFalse; break; } case Plane2CylinderDistortion: { /* direct calculation center is either pixel center, or pixel edge * so as to allow reversibility of the image distortion */ geometry.x = geometry.y = 0; geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */ geometry.height = (size_t) (2*coeff[3]); /* input image height */ /* correct center of distortion relative to new size */ coeff[4] = (double) geometry.width/2.0; coeff[5] = (double) geometry.height/2.0; fix_bounds = MagickFalse; break; } case ShepardsDistortion: case BilinearForwardDistortion: case BilinearReverseDistortion: #if 0 case QuadrilateralDistortion: #endif case PolynomialDistortion: case BarrelDistortion: case BarrelInverseDistortion: default: /* no calculated bestfit available for these distortions */ bestfit = MagickFalse; fix_bounds = MagickFalse; break; } /* Set the output image geometry to calculated 'bestfit'. Yes this tends to 'over do' the file image size, ON PURPOSE! Do not do this for DePolar which needs to be exact for virtual tiling. */ if ( fix_bounds ) { geometry.x = (ssize_t) floor(min.x-0.5); geometry.y = (ssize_t) floor(min.y-0.5); geometry.width=(size_t) ceil(max.x-geometry.x+0.5); geometry.height=(size_t) ceil(max.y-geometry.y+0.5); } } /* end bestfit destination image calculations */ /* The user provided a 'viewport' expert option which may overrides some parts of the current output image geometry. This also overrides its default 'bestfit' setting. */ { const char *artifact=GetImageArtifact(image,"distort:viewport"); viewport_given = MagickFalse; if ( artifact != (const char *) NULL ) { MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry); if (flags==NoValue) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidSetting","'%s' '%s'", "distort:viewport",artifact); else viewport_given = MagickTrue; } } /* Verbose output */ if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) { ssize_t i; char image_gen[MagickPathExtent]; const char *lookup; /* Set destination image size and virtual offset */ if ( bestfit || viewport_given ) { (void) FormatLocaleString(image_gen,MagickPathExtent, " -size %.20gx%.20g -page %+.20g%+.20g xc: +insert \\\n", (double) geometry.width,(double) geometry.height,(double) geometry.x, (double) geometry.y); lookup="v.p{xx-v.page.x-0.5,yy-v.page.y-0.5}"; } else { image_gen[0] = '\0'; /* no destination to generate */ lookup = "p{xx-page.x-0.5,yy-page.y-0.5}"; /* simplify lookup */ } switch (method) { case AffineDistortion: case RigidAffineDistortion: { double *inverse; inverse=(double *) AcquireQuantumMemory(6,sizeof(*inverse)); if (inverse == (double *) NULL) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s","DistortImages"); return((Image *) NULL); } InvertAffineCoefficients(coeff, inverse); CoefficientsToAffineArgs(inverse); (void) FormatLocaleFile(stderr, "Affine projection:\n"); (void) FormatLocaleFile(stderr, " -distort AffineProjection \\\n '"); for (i=0; i < 5; i++) (void) FormatLocaleFile(stderr, "%.*g,",GetMagickPrecision(), inverse[i]); (void) FormatLocaleFile(stderr, "%.*g'\n",GetMagickPrecision(), inverse[5]); (void) FormatLocaleFile(stderr, "Equivalent scale, rotation(deg), translation:\n"); (void) FormatLocaleFile(stderr," %.*g,%.*g,%.*g,%.*g\n", GetMagickPrecision(),sqrt(inverse[0]*inverse[0]+ inverse[1]*inverse[1]),GetMagickPrecision(), RadiansToDegrees(atan2(inverse[1],inverse[0])), GetMagickPrecision(),inverse[4],GetMagickPrecision(),inverse[5]); inverse=(double *) RelinquishMagickMemory(inverse); (void) FormatLocaleFile(stderr,"Affine distort, FX equivalent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr," xx=%+.*g*ii %+.*g*jj %+.*g;\n", GetMagickPrecision(),coeff[0],GetMagickPrecision(),coeff[1], GetMagickPrecision(),coeff[2]); (void) FormatLocaleFile(stderr," yy=%+.*g*ii %+.*g*jj %+.*g;\n", GetMagickPrecision(),coeff[3],GetMagickPrecision(),coeff[4], GetMagickPrecision(),coeff[5]); (void) FormatLocaleFile(stderr," %s' \\\n",lookup); break; } case PerspectiveDistortion: { double *inverse; inverse=(double *) AcquireQuantumMemory(8,sizeof(*inverse)); if (inverse == (double *) NULL) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", "DistortCoefficients"); return((Image *) NULL); } InvertPerspectiveCoefficients(coeff, inverse); (void) FormatLocaleFile(stderr,"Perspective Projection:\n"); (void) FormatLocaleFile(stderr, " -distort PerspectiveProjection \\\n '"); for (i=0; i < 4; i++) (void) FormatLocaleFile(stderr, "%.*g, ",GetMagickPrecision(), inverse[i]); (void) FormatLocaleFile(stderr, "\n "); for ( ; i < 7; i++) (void) FormatLocaleFile(stderr, "%.*g, ",GetMagickPrecision(), inverse[i]); (void) FormatLocaleFile(stderr, "%.*g'\n",GetMagickPrecision(), inverse[7]); inverse=(double *) RelinquishMagickMemory(inverse); (void) FormatLocaleFile(stderr,"Perspective Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr,"%.1024s",image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr," rr=%+.*g*ii %+.*g*jj + 1;\n", GetMagickPrecision(),coeff[6],GetMagickPrecision(),coeff[7]); (void) FormatLocaleFile(stderr, " xx=(%+.*g*ii %+.*g*jj %+.*g)/rr;\n", GetMagickPrecision(),coeff[0],GetMagickPrecision(),coeff[1], GetMagickPrecision(),coeff[2]); (void) FormatLocaleFile(stderr, " yy=(%+.*g*ii %+.*g*jj %+.*g)/rr;\n", GetMagickPrecision(),coeff[3],GetMagickPrecision(),coeff[4], GetMagickPrecision(),coeff[5]); (void) FormatLocaleFile(stderr," rr%s0 ? %s : blue' \\\n", coeff[8] < 0.0 ? "<" : ">", lookup); break; } case BilinearForwardDistortion: { (void) FormatLocaleFile(stderr,"BilinearForward Mapping Equations:\n"); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr," i = %+lf*x %+lf*y %+lf*x*y %+lf;\n", coeff[0],coeff[1],coeff[2],coeff[3]); (void) FormatLocaleFile(stderr," j = %+lf*x %+lf*y %+lf*x*y %+lf;\n", coeff[4],coeff[5],coeff[6],coeff[7]); #if 0 /* for debugging */ (void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n", coeff[8], coeff[9]); #endif (void) FormatLocaleFile(stderr, "BilinearForward Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",0.5-coeff[3],0.5- coeff[7]); (void) FormatLocaleFile(stderr," bb=%lf*ii %+lf*jj %+lf;\n", coeff[6], -coeff[2], coeff[8]); /* Handle Special degenerate (non-quadratic) or trapezoidal case */ if (coeff[9] != 0) { (void) FormatLocaleFile(stderr, " rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n",-2*coeff[9],coeff[4], -coeff[0]); (void) FormatLocaleFile(stderr, " yy=( -bb + sqrt(rt) ) / %lf;\n",coeff[9]); } else (void) FormatLocaleFile(stderr," yy=(%lf*ii%+lf*jj)/bb;\n", -coeff[4],coeff[0]); (void) FormatLocaleFile(stderr, " xx=(ii %+lf*yy)/(%lf %+lf*yy);\n",-coeff[1],coeff[0], coeff[2]); if ( coeff[9] != 0 ) (void) FormatLocaleFile(stderr," (rt < 0 ) ? red : %s'\n", lookup); else (void) FormatLocaleFile(stderr," %s' \\\n", lookup); break; } case BilinearReverseDistortion: { #if 0 (void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n"); (void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n"); (void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n", coeff[3], coeff[0], coeff[1], coeff[2]); (void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n", coeff[7], coeff[4], coeff[5], coeff[6]); #endif (void) FormatLocaleFile(stderr, "BilinearReverse Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",coeff[0],coeff[1], coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n",coeff[4],coeff[5], coeff[6], coeff[7]); (void) FormatLocaleFile(stderr," %s' \\\n", lookup); break; } case PolynomialDistortion: { size_t nterms = (size_t) coeff[1]; (void) FormatLocaleFile(stderr, "Polynomial (order %lg, terms %lu), FX Equivelent\n",coeff[0], (unsigned long) nterms); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx ="); for (i=0; i < (ssize_t) nterms; i++) { if ((i != 0) && (i%4 == 0)) (void) FormatLocaleFile(stderr, "\n "); (void) FormatLocaleFile(stderr," %+lf%s",coeff[2+i], poly_basis_str(i)); } (void) FormatLocaleFile(stderr,";\n yy ="); for (i=0; i < (ssize_t) nterms; i++) { if ((i != 0) && (i%4 == 0)) (void) FormatLocaleFile(stderr,"\n "); (void) FormatLocaleFile(stderr," %+lf%s",coeff[2+i+nterms], poly_basis_str(i)); } (void) FormatLocaleFile(stderr,";\n %s' \\\n", lookup); break; } case ArcDistortion: { (void) FormatLocaleFile(stderr,"Arc Distort, Internal Coefficients:\n"); for (i=0; i < 5; i++) (void) FormatLocaleFile(stderr, " c%.20g = %+lf\n",(double) i,coeff[i]); (void) FormatLocaleFile(stderr,"Arc Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr," -fx 'ii=i+page.x; jj=j+page.y;\n"); (void) FormatLocaleFile(stderr," xx=(atan2(jj,ii)%+lf)/(2*pi);\n", -coeff[0]); (void) FormatLocaleFile(stderr," xx=xx-round(xx);\n"); (void) FormatLocaleFile(stderr," xx=xx*%lf %+lf;\n",coeff[1], coeff[4]); (void) FormatLocaleFile(stderr, " yy=(%lf - hypot(ii,jj)) * %lf;\n",coeff[2],coeff[3]); (void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n"); break; } case PolarDistortion: { (void) FormatLocaleFile(stderr,"Polar Distort, Internal Coefficents\n"); for (i=0; i < 8; i++) (void) FormatLocaleFile(stderr," c%.20g = %+lf\n",(double) i, coeff[i]); (void) FormatLocaleFile(stderr,"Polar Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n",-coeff[2],-coeff[3]); (void) FormatLocaleFile(stderr," xx=(atan2(ii,jj)%+lf)/(2*pi);\n", -(coeff[4]+coeff[5])/2 ); (void) FormatLocaleFile(stderr," xx=xx-round(xx);\n"); (void) FormatLocaleFile(stderr," xx=xx*2*pi*%lf + v.w/2;\n", coeff[6] ); (void) FormatLocaleFile(stderr," yy=(hypot(ii,jj)%+lf)*%lf;\n", -coeff[1],coeff[7] ); (void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n"); break; } case DePolarDistortion: { (void) FormatLocaleFile(stderr, "DePolar Distort, Internal Coefficents\n"); for (i=0; i < 8; i++) (void) FormatLocaleFile(stderr," c%.20g = %+lf\n",(double) i, coeff[i]); (void) FormatLocaleFile(stderr,"DePolar Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr," -fx 'aa=(i+.5)*%lf %+lf;\n", coeff[6],+coeff[4]); (void) FormatLocaleFile(stderr," rr=(j+.5)*%lf %+lf;\n", coeff[7],+coeff[1]); (void) FormatLocaleFile(stderr," xx=rr*sin(aa) %+lf;\n", coeff[2]); (void) FormatLocaleFile(stderr," yy=rr*cos(aa) %+lf;\n", coeff[3]); (void) FormatLocaleFile(stderr," v.p{xx-.5,yy-.5}' \\\n"); break; } case Cylinder2PlaneDistortion: { (void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, Internal Coefficents\n"); (void) FormatLocaleFile(stderr," cylinder_radius = %+lf\n",coeff[1]); (void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",-coeff[4], -coeff[5]); (void) FormatLocaleFile(stderr," aa=atan(ii/%+lf);\n",coeff[1]); (void) FormatLocaleFile(stderr," xx=%lf*aa%+lf;\n", coeff[1],coeff[2]); (void) FormatLocaleFile(stderr," yy=jj*cos(aa)%+lf;\n",coeff[3]); (void) FormatLocaleFile(stderr," %s' \\\n", lookup); break; } case Plane2CylinderDistortion: { (void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, Internal Coefficents\n"); (void) FormatLocaleFile(stderr," cylinder_radius = %+lf\n",coeff[1]); (void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr,"%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n",-coeff[4], -coeff[5]); (void) FormatLocaleFile(stderr," ii=ii/%+lf;\n",coeff[1]); (void) FormatLocaleFile(stderr," xx=%lf*tan(ii)%+lf;\n",coeff[1], coeff[2] ); (void) FormatLocaleFile(stderr," yy=jj/cos(ii)%+lf;\n",coeff[3]); (void) FormatLocaleFile(stderr," %s' \\\n", lookup); break; } case BarrelDistortion: case BarrelInverseDistortion: { double xc, yc; /* NOTE: This does the barrel roll in pixel coords not image coords The internal distortion must do it in image coordinates, so that is what the center coeff (8,9) is given in. */ xc=((double)image->columns-1.0)/2.0+image->page.x; yc=((double)image->rows-1.0)/2.0+image->page.y; (void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n", method == BarrelDistortion ? "" : "Inv"); (void) FormatLocaleFile(stderr, "%s", image_gen); if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 ) (void) FormatLocaleFile(stderr," -fx 'xc=(w-1)/2; yc=(h-1)/2;\n"); else (void) FormatLocaleFile(stderr," -fx 'xc=%lf; yc=%lf;\n",coeff[8]- 0.5,coeff[9]-0.5); (void) FormatLocaleFile(stderr, " ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n"); (void) FormatLocaleFile(stderr, " ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n", method == BarrelDistortion ? "*" : "/",coeff[0],coeff[1],coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n", method == BarrelDistortion ? "*" : "/",coeff[4],coeff[5],coeff[6], coeff[7]); (void) FormatLocaleFile(stderr," v.p{fx*ii+xc,fy*jj+yc}' \\\n"); } default: break; } } /* The user provided a 'scale' expert option will scale the output image size, by the factor given allowing for super-sampling of the distorted image space. Any scaling factors must naturally be halved as a result. */ { const char *artifact; artifact=GetImageArtifact(image,"distort:scale"); output_scaling = 1.0; if (artifact != (const char *) NULL) { output_scaling = fabs(StringToDouble(artifact,(char **) NULL)); geometry.width=(size_t) (output_scaling*geometry.width+0.5); geometry.height=(size_t) (output_scaling*geometry.height+0.5); geometry.x=(ssize_t) (output_scaling*geometry.x+0.5); geometry.y=(ssize_t) (output_scaling*geometry.y+0.5); if ( output_scaling < 0.1 ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s", "-set option:distort:scale" ); return((Image *) NULL); } output_scaling = 1/output_scaling; } } #define ScaleFilter(F,A,B,C,D) \ ScaleResampleFilter( (F), \ output_scaling*(A), output_scaling*(B), \ output_scaling*(C), output_scaling*(D) ) /* Initialize the distort image attributes. */ distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue, exception); if (distort_image == (Image *) NULL) { coeff=(double *) RelinquishMagickMemory(coeff); return((Image *) NULL); } /* if image is ColorMapped - change it to DirectClass */ if (SetImageStorageClass(distort_image,DirectClass,exception) == MagickFalse) { coeff=(double *) RelinquishMagickMemory(coeff); distort_image=DestroyImage(distort_image); return((Image *) NULL); } if ((IsPixelInfoGray(&distort_image->background_color) == MagickFalse) && (IsGrayColorspace(distort_image->colorspace) != MagickFalse)) (void) SetImageColorspace(distort_image,sRGBColorspace,exception); if (distort_image->background_color.alpha_trait != UndefinedPixelTrait) distort_image->alpha_trait=BlendPixelTrait; distort_image->page.x=geometry.x; distort_image->page.y=geometry.y; ConformPixelInfo(distort_image,&distort_image->matte_color,&invalid, exception); { /* ----- MAIN CODE ----- Sample the source image to each pixel in the distort image. */ CacheView *distort_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; ResampleFilter **magick_restrict resample_filter; ssize_t j; status=MagickTrue; progress=0; GetPixelInfo(distort_image,&zero); resample_filter=AcquireResampleFilterThreadSet(image, UndefinedVirtualPixelMethod,MagickFalse,exception); distort_view=AcquireAuthenticCacheView(distort_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,distort_image,distort_image->rows,1) #endif for (j=0; j < (ssize_t) distort_image->rows; j++) { const int id = GetOpenMPThreadId(); double validity; /* how mathematically valid is this the mapping */ MagickBooleanType sync; PixelInfo pixel; /* pixel color to assign to distorted image */ PointInfo d, s; /* transform destination image x,y to source image x,y */ ssize_t i; Quantum *magick_restrict q; q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; /* Define constant scaling vectors for Affine Distortions Other methods are either variable, or use interpolated lookup */ switch (method) { case AffineDistortion: case RigidAffineDistortion: ScaleFilter( resample_filter[id], coeff[0], coeff[1], coeff[3], coeff[4] ); break; default: break; } /* Initialize default pixel validity * negative: pixel is invalid output 'matte_color' * 0.0 to 1.0: antialiased, mix with resample output * 1.0 or greater: use resampled output. */ validity = 1.0; for (i=0; i < (ssize_t) distort_image->columns; i++) { /* map pixel coordinate to distortion space coordinate */ d.x = (double) (geometry.x+i+0.5)*output_scaling; d.y = (double) (geometry.y+j+0.5)*output_scaling; s = d; /* default is a no-op mapping */ switch (method) { case AffineDistortion: case RigidAffineDistortion: { s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]; s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5]; /* Affine partial derivitives are constant -- set above */ break; } case PerspectiveDistortion: { double p,q,r,abs_r,abs_c6,abs_c7,scale; /* perspective is a ratio of affines */ p=coeff[0]*d.x+coeff[1]*d.y+coeff[2]; q=coeff[3]*d.x+coeff[4]*d.y+coeff[5]; r=coeff[6]*d.x+coeff[7]*d.y+1.0; /* Pixel Validity -- is it a 'sky' or 'ground' pixel */ validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0; /* Determine horizon anti-alias blending */ abs_r = fabs(r)*2; abs_c6 = fabs(coeff[6]); abs_c7 = fabs(coeff[7]); if ( abs_c6 > abs_c7 ) { if ( abs_r < abs_c6*output_scaling ) validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling); } else if ( abs_r < abs_c7*output_scaling ) validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling); /* Perspective Sampling Point (if valid) */ if ( validity > 0.0 ) { /* divide by r affine, for perspective scaling */ scale = 1.0/r; s.x = p*scale; s.y = q*scale; /* Perspective Partial Derivatives or Scaling Vectors */ scale *= scale; ScaleFilter( resample_filter[id], (r*coeff[0] - p*coeff[6])*scale, (r*coeff[1] - p*coeff[7])*scale, (r*coeff[3] - q*coeff[6])*scale, (r*coeff[4] - q*coeff[7])*scale ); } break; } case BilinearReverseDistortion: { /* Reversed Mapped is just a simple polynomial */ s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3]; s.y=coeff[4]*d.x+coeff[5]*d.y +coeff[6]*d.x*d.y+coeff[7]; /* Bilinear partial derivitives of scaling vectors */ ScaleFilter( resample_filter[id], coeff[0] + coeff[2]*d.y, coeff[1] + coeff[2]*d.x, coeff[4] + coeff[6]*d.y, coeff[5] + coeff[6]*d.x ); break; } case BilinearForwardDistortion: { /* Forward mapped needs reversed polynomial equations * which unfortunatally requires a square root! */ double b,c; d.x -= coeff[3]; d.y -= coeff[7]; b = coeff[6]*d.x - coeff[2]*d.y + coeff[8]; c = coeff[4]*d.x - coeff[0]*d.y; validity = 1.0; /* Handle Special degenerate (non-quadratic) case * Currently without horizon anti-alising */ if ( fabs(coeff[9]) < MagickEpsilon ) s.y = -c/b; else { c = b*b - 2*coeff[9]*c; if ( c < 0.0 ) validity = 0.0; else s.y = ( -b + sqrt(c) )/coeff[9]; } if ( validity > 0.0 ) s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y ); /* NOTE: the sign of the square root should be -ve for parts where the source image becomes 'flipped' or 'mirrored'. FUTURE: Horizon handling FUTURE: Scaling factors or Deritives (how?) */ break; } #if 0 case BilinearDistortion: /* Bilinear mapping of any Quadrilateral to any Quadrilateral */ /* UNDER DEVELOPMENT */ break; #endif case PolynomialDistortion: { /* multi-ordered polynomial */ ssize_t k; ssize_t nterms=(ssize_t)coeff[1]; PointInfo du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */ s.x=s.y=du.x=du.y=dv.x=dv.y=0.0; for(k=0; k < nterms; k++) { s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k]; du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k]; du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k]; s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms]; dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms]; dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms]; } ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y ); break; } case ArcDistortion: { /* what is the angle and radius in the destination image */ s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI); s.x -= MagickRound(s.x); /* angle */ s.y = hypot(d.x,d.y); /* radius */ /* Arc Distortion Partial Scaling Vectors Are derived by mapping the perpendicular unit vectors dR and dA*R*2PI rather than trying to map dx and dy The results is a very simple orthogonal aligned ellipse. */ if ( s.y > MagickEpsilon ) ScaleFilter( resample_filter[id], (double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] ); else ScaleFilter( resample_filter[id], distort_image->columns*2, 0, 0, coeff[3] ); /* now scale the angle and radius for source image lookup point */ s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5; s.y = (coeff[2] - s.y) * coeff[3] + image->page.y; break; } case PolarDistortion: { /* 2D Cartesain to Polar View */ d.x -= coeff[2]; d.y -= coeff[3]; s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2; s.x /= Magick2PI; s.x -= MagickRound(s.x); s.x *= Magick2PI; /* angle - relative to centerline */ s.y = hypot(d.x,d.y); /* radius */ /* Polar Scaling vectors are based on mapping dR and dA vectors This results in very simple orthogonal scaling vectors */ if ( s.y > MagickEpsilon ) ScaleFilter( resample_filter[id], (double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] ); else ScaleFilter( resample_filter[id], distort_image->columns*2, 0, 0, coeff[7] ); /* now finish mapping radius/angle to source x,y coords */ s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x; s.y = (s.y-coeff[1])*coeff[7] + image->page.y; break; } case DePolarDistortion: { /* @D Polar to Carteasain */ /* ignore all destination virtual offsets */ d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4]; d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1]; s.x = d.y*sin(d.x) + coeff[2]; s.y = d.y*cos(d.x) + coeff[3]; /* derivatives are usless - better to use SuperSampling */ break; } case Cylinder2PlaneDistortion: { /* 3D Cylinder to Tangential Plane */ double ax, cx; /* relative to center of distortion */ d.x -= coeff[4]; d.y -= coeff[5]; d.x /= coeff[1]; /* x' = x/r */ ax=atan(d.x); /* aa = atan(x/r) = u/r */ cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */ s.x = coeff[1]*ax; /* u = r*atan(x/r) */ s.y = d.y*cx; /* v = y*cos(u/r) */ /* derivatives... (see personnal notes) */ ScaleFilter( resample_filter[id], 1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y ); #if 0 if ( i == 0 && j == 0 ) { fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y); fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) ); fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n", 1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y ); fflush(stderr); } #endif /* add center of distortion in source */ s.x += coeff[2]; s.y += coeff[3]; break; } case Plane2CylinderDistortion: { /* 3D Cylinder to Tangential Plane */ /* relative to center of distortion */ d.x -= coeff[4]; d.y -= coeff[5]; /* is pixel valid - horizon of a infinite Virtual-Pixel Plane * (see Anthony Thyssen's personal note) */ validity = (double) (coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5; if ( validity > 0.0 ) { double cx,tx; d.x /= coeff[1]; /* x'= x/r */ cx = 1/cos(d.x); /* cx = 1/cos(x/r) */ tx = tan(d.x); /* tx = tan(x/r) */ s.x = coeff[1]*tx; /* u = r * tan(x/r) */ s.y = d.y*cx; /* v = y / cos(x/r) */ /* derivatives... (see Anthony Thyssen's personal notes) */ ScaleFilter( resample_filter[id], cx*cx, 0.0, s.y*cx/coeff[1], cx ); #if 0 /*if ( i == 0 && j == 0 )*/ if ( d.x == 0.5 && d.y == 0.5 ) { fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y); fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n", coeff[1], (double)(d.x * 180.0/MagickPI), validity ); fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n", cx*cx, 0.0, s.y*cx/coeff[1], cx); fflush(stderr); } #endif } /* add center of distortion in source */ s.x += coeff[2]; s.y += coeff[3]; break; } case BarrelDistortion: case BarrelInverseDistortion: { /* Lens Barrel Distionion Correction */ double r,fx,fy,gx,gy; /* Radial Polynomial Distortion (de-normalized) */ d.x -= coeff[8]; d.y -= coeff[9]; r = sqrt(d.x*d.x+d.y*d.y); if ( r > MagickEpsilon ) { fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3]; fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7]; gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r; gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r; /* adjust functions and scaling for 'inverse' form */ if ( method == BarrelInverseDistortion ) { fx = 1/fx; fy = 1/fy; gx *= -fx*fx; gy *= -fy*fy; } /* Set the source pixel to lookup and EWA derivative vectors */ s.x = d.x*fx + coeff[8]; s.y = d.y*fy + coeff[9]; ScaleFilter( resample_filter[id], gx*d.x*d.x + fx, gx*d.x*d.y, gy*d.x*d.y, gy*d.y*d.y + fy ); } else { /* Special handling to avoid divide by zero when r==0 ** ** The source and destination pixels match in this case ** which was set at the top of the loop using s = d; ** otherwise... s.x=coeff[8]; s.y=coeff[9]; */ if ( method == BarrelDistortion ) ScaleFilter( resample_filter[id], coeff[3], 0, 0, coeff[7] ); else /* method == BarrelInverseDistortion */ /* FUTURE, trap for D==0 causing division by zero */ ScaleFilter( resample_filter[id], 1.0/coeff[3], 0, 0, 1.0/coeff[7] ); } break; } case ShepardsDistortion: { /* Shepards Method, or Inverse Weighted Distance for displacement around the destination image control points The input arguments are the coefficents to the function. This is more of a 'displacement' function rather than an absolute distortion function. Note: We can not determine derivatives using shepards method so only a point sample interpolatation can be used. */ size_t i; double denominator; denominator = s.x = s.y = 0; for(i=0; i<number_arguments; i+=4) { double weight = ((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2]) + ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]); weight = pow(weight,coeff[0]); /* shepards power factor */ weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight; s.x += (arguments[ i ]-arguments[i+2])*weight; s.y += (arguments[i+1]-arguments[i+3])*weight; denominator += weight; } s.x /= denominator; s.y /= denominator; s.x += d.x; /* make it as relative displacement */ s.y += d.y; break; } default: break; /* use the default no-op given above */ } /* map virtual canvas location back to real image coordinate */ if ( bestfit && method != ArcDistortion ) { s.x -= image->page.x; s.y -= image->page.y; } s.x -= 0.5; s.y -= 0.5; if ( validity <= 0.0 ) { /* result of distortion is an invalid pixel - don't resample */ SetPixelViaPixelInfo(distort_image,&invalid,q); } else { /* resample the source image to find its correct color */ (void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel, exception); /* if validity between 0.0 and 1.0 mix result with invalid pixel */ if ( validity < 1.0 ) { /* Do a blend of sample color and invalid pixel */ /* should this be a 'Blend', or an 'Over' compose */ CompositePixelInfoBlend(&pixel,validity,&invalid,(1.0-validity), &pixel); } SetPixelViaPixelInfo(distort_image,&pixel,q); } q+=GetPixelChannels(distort_image); } sync=SyncCacheViewAuthenticPixels(distort_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,DistortImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } distort_view=DestroyCacheView(distort_view); resample_filter=DestroyResampleFilterThreadSet(resample_filter); if (status == MagickFalse) distort_image=DestroyImage(distort_image); } /* Arc does not return an offset unless 'bestfit' is in effect And the user has not provided an overriding 'viewport'. */ if ( method == ArcDistortion && !bestfit && !viewport_given ) { distort_image->page.x = 0; distort_image->page.y = 0; } coeff=(double *) RelinquishMagickMemory(coeff); return(distort_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotateImage() creates a new image that is a rotated copy of an existing % one. Positive angles rotate counter-clockwise (right-hand rule), while % negative angles rotate clockwise. Rotated images are usually larger than % the originals and have 'empty' triangular corners. X axis. Empty % triangles left over from shearing the image are filled with the background % color defined by member 'background_color' of the image. RotateImage % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the RotateImage method is: % % Image *RotateImage(const Image *image,const double degrees, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: Specifies the number of degrees to rotate the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotateImage(const Image *image,const double degrees, ExceptionInfo *exception) { Image *distort_image, *rotate_image; double angle; PointInfo shear; size_t rotations; /* Adjust rotation angle. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); angle=fmod(degrees,360.0); while (angle < -45.0) angle+=360.0; for (rotations=0; angle > 45.0; rotations++) angle-=90.0; rotations%=4; shear.x=(-tan((double) DegreesToRadians(angle)/2.0)); shear.y=sin((double) DegreesToRadians(angle)); if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon)) return(IntegralRotateImage(image,rotations,exception)); distort_image=CloneImage(image,0,0,MagickTrue,exception); if (distort_image == (Image *) NULL) return((Image *) NULL); (void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod, exception); rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1, &degrees,MagickTrue,exception); distort_image=DestroyImage(distort_image); return(rotate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p a r s e C o l o r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SparseColorImage(), given a set of coordinates, interpolates the colors % found at those coordinates, across the whole image, using various methods. % % The format of the SparseColorImage() method is: % % Image *SparseColorImage(const Image *image, % const SparseColorMethod method,const size_t number_arguments, % const double *arguments,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be filled in. % % o method: the method to fill in the gradient between the control points. % % The methods used for SparseColor() are often simular to methods % used for DistortImage(), and even share the same code for determination % of the function coefficents, though with more dimensions (or resulting % values). % % o number_arguments: the number of arguments given. % % o arguments: array of floating point arguments for this method-- % x,y,color_values-- with color_values given as normalized values. % % o exception: return any errors or warnings in this structure % */ MagickExport Image *SparseColorImage(const Image *image, const SparseColorMethod method,const size_t number_arguments, const double *arguments,ExceptionInfo *exception) { #define SparseColorTag "Distort/SparseColor" SparseColorMethod sparse_method; double *coeff; Image *sparse_image; size_t number_colors; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Determine number of color values needed per control point */ number_colors=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) number_colors++; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) number_colors++; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) number_colors++; /* Convert input arguments into mapping coefficients, this this case we are mapping (distorting) colors, rather than coordinates. */ { DistortMethod distort_method; distort_method=(DistortMethod) method; if ( distort_method >= SentinelDistortion ) distort_method = ShepardsDistortion; /* Pretend to be Shepards */ coeff = GenerateCoefficients(image, &distort_method, number_arguments, arguments, number_colors, exception); if ( coeff == (double *) NULL ) return((Image *) NULL); /* Note some Distort Methods may fall back to other simpler methods, Currently the only fallback of concern is Bilinear to Affine (Barycentric), which is alaso sparse_colr method. This also ensures correct two and one color Barycentric handling. */ sparse_method = (SparseColorMethod) distort_method; if ( distort_method == ShepardsDistortion ) sparse_method = method; /* return non-distort methods to normal */ if ( sparse_method == InverseColorInterpolate ) coeff[0]=0.5; /* sqrt() the squared distance for inverse */ } /* Verbose output */ if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) { switch (sparse_method) { case BarycentricColorInterpolate: { ssize_t x=0; (void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n"); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) (void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) (void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; break; } case BilinearColorInterpolate: { ssize_t x=0; (void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n"); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) (void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) (void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; break; } default: /* sparse color method is too complex for FX emulation */ break; } } /* Generate new image for generated interpolated gradient. * ASIDE: Actually we could have just replaced the colors of the original * image, but IM Core policy, is if storage class could change then clone * the image. */ sparse_image=CloneImage(image,0,0,MagickTrue,exception); if (sparse_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(sparse_image,DirectClass,exception) == MagickFalse) { /* if image is ColorMapped - change it to DirectClass */ sparse_image=DestroyImage(sparse_image); return((Image *) NULL); } { /* ----- MAIN CODE ----- */ CacheView *sparse_view; MagickBooleanType status; MagickOffsetType progress; ssize_t j; status=MagickTrue; progress=0; sparse_view=AcquireAuthenticCacheView(sparse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sparse_image,sparse_image->rows,1) #endif for (j=0; j < (ssize_t) sparse_image->rows; j++) { MagickBooleanType sync; PixelInfo pixel; /* pixel to assign to distorted image */ ssize_t i; Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(sparse_image,&pixel); for (i=0; i < (ssize_t) image->columns; i++) { GetPixelInfoPixel(image,q,&pixel); switch (sparse_method) { case BarycentricColorInterpolate: { ssize_t x=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; break; } case BilinearColorInterpolate: { ssize_t x=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; break; } case InverseColorInterpolate: case ShepardsColorInterpolate: { /* Inverse (Squared) Distance weights average (IDW) */ size_t k; double denominator; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=0.0; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=0.0; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=0.0; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=0.0; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=0.0; denominator = 0.0; for(k=0; k<number_arguments; k+=2+number_colors) { ssize_t x=(ssize_t) k+2; double weight = ((double)i-arguments[ k ])*((double)i-arguments[ k ]) + ((double)j-arguments[k+1])*((double)j-arguments[k+1]); weight = pow(weight,coeff[0]); /* inverse of power factor */ weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red += arguments[x++]*weight; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green += arguments[x++]*weight; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue += arguments[x++]*weight; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black += arguments[x++]*weight; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha += arguments[x++]*weight; denominator += weight; } if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red/=denominator; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green/=denominator; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue/=denominator; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black/=denominator; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha/=denominator; break; } case ManhattanColorInterpolate: { size_t k; double minimum = MagickMaximumValue; /* Just use the closest control point you can find! */ for(k=0; k<number_arguments; k+=2+number_colors) { double distance = fabs((double)i-arguments[ k ]) + fabs((double)j-arguments[k+1]); if ( distance < minimum ) { ssize_t x=(ssize_t) k+2; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=arguments[x++]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=arguments[x++]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=arguments[x++]; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=arguments[x++]; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=arguments[x++]; minimum = distance; } } break; } case VoronoiColorInterpolate: default: { size_t k; double minimum = MagickMaximumValue; /* Just use the closest control point you can find! */ for (k=0; k<number_arguments; k+=2+number_colors) { double distance = ((double)i-arguments[ k ])*((double)i-arguments[ k ]) + ((double)j-arguments[k+1])*((double)j-arguments[k+1]); if ( distance < minimum ) { ssize_t x=(ssize_t) k+2; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=arguments[x++]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=arguments[x++]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=arguments[x++]; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=arguments[x++]; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=arguments[x++]; minimum = distance; } } break; } } /* set the color directly back into the source image */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=(MagickRealType) ClampPixel(QuantumRange*pixel.red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=(MagickRealType) ClampPixel(QuantumRange*pixel.green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=(MagickRealType) ClampPixel(QuantumRange*pixel.blue); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=(MagickRealType) ClampPixel(QuantumRange*pixel.black); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=(MagickRealType) ClampPixel(QuantumRange*pixel.alpha); SetPixelViaPixelInfo(sparse_image,&pixel,q); q+=GetPixelChannels(sparse_image); } sync=SyncCacheViewAuthenticPixels(sparse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SparseColorTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sparse_view=DestroyCacheView(sparse_view); if (status == MagickFalse) sparse_image=DestroyImage(sparse_image); } coeff = (double *) RelinquishMagickMemory(coeff); return(sparse_image); }
GB_binop__first_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__first_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__first_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__first_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_uint64) // A*D function (colscale): GB (_AxD__first_uint64) // D*A function (rowscale): GB (_DxB__first_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__first_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__first_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_uint64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 1 // BinaryOp: cij = aij #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_UINT64 || GxB_NO_FIRST_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__first_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__first_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__first_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
mttkrp_omp.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <HiParTI.h> #include "sptensor.h" int ptiOmpMTTKRP_3D(ptiSparseTensor const * const X, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk); int ptiOmpMTTKRP_3D_Reduce(ptiSparseTensor const * const X, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiMatrix * copy_mats[], // temporary matrices for reduction ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk); int ptiOmpMTTKRP_3D_Lock(ptiSparseTensor const * const X, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk, ptiMutexPool * lock_pool); /** * OpenMP parallelized Matriced sparse tensor times a sequence of dense matrix Khatri-Rao products (MTTKRP) on a specified mode * @param[out] mats[nmodes] the result of MTTKRP, a dense matrix, with size * ndims[mode] * R * @param[in] X the sparse tensor input X * @param[in] mats (N+1) dense matrices, with mats[nmodes] as temporary * @param[in] mats_order the order of the Khatri-Rao products * @param[in] mode the mode on which the MTTKRP is performed * @param[in] scratch an temporary array to store intermediate results, space assigned before this function * * This function uses support arbitrary-order sparse tensors with Khatri-Rao * products of dense factor matrices, the output is the updated dense matrix for the "mode". * In this version, a large scratch is used to maximize parallelism. (To be optimized) */ int ptiOmpMTTKRP_Init(ptiSparseTensor const * const X, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode) { ptiIndex const nmodes = X->nmodes; ptiNnzIndex const nnz = X->nnz; ptiIndex const * const ndims = X->ndims; ptiValue const * const vals = X->values.data; ptiIndex const stride = mats[0]->stride; ptiValueVector scratch; // Temporary array ptiNewValueVector(&scratch, nnz * stride, nnz * stride); ptiConstantValueVector(&scratch, 0); /* Check the mats. */ for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiIndex const R = mats[mode]->ncols; ptiIndex const * const mode_ind = X->inds[mode].data; ptiMatrix * const M = mats[nmodes]; ptiValue * const mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(ptiValue)); #pragma omp parallel for for(ptiNnzIndex x=0; x<nnz; ++x) { ptiIndex times_mat_index = mats_order[1]; ptiMatrix * times_mat = mats[times_mat_index]; ptiIndex * times_inds = X->inds[times_mat_index].data; ptiIndex tmp_i = times_inds[x]; ptiValue const entry = vals[x]; for(ptiIndex r=0; r<R; ++r) { scratch.data[x * stride + r] = entry * times_mat->values[tmp_i * stride + r]; } for(ptiIndex i=2; i<nmodes; ++i) { times_mat_index = mats_order[i]; times_mat = mats[times_mat_index]; times_inds = X->inds[times_mat_index].data; tmp_i = times_inds[x]; for(ptiIndex r=0; r<R; ++r) { scratch.data[x * stride + r] *= times_mat->values[tmp_i * stride + r]; } } } for(ptiNnzIndex x=0; x<nnz; ++x) { ptiIndex const mode_i = mode_ind[x]; for(ptiIndex r=0; r<R; ++r) { mvals[mode_i * stride + r] += scratch.data[x * stride + r]; } } ptiFreeValueVector(&scratch); return 0; } int ptiOmpMTTKRP(ptiSparseTensor const * const X, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk) { ptiIndex const nmodes = X->nmodes; if(nmodes == 3) { ptiAssert(ptiOmpMTTKRP_3D(X, mats, mats_order, mode, tk) == 0); return 0; } ptiNnzIndex const nnz = X->nnz; ptiIndex const * const ndims = X->ndims; ptiValue const * const vals = X->values.data; ptiIndex const stride = mats[0]->stride; /* Check the mats. */ for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiIndex const R = mats[mode]->ncols; ptiIndex const * const mode_ind = X->inds[mode].data; ptiValue * const restrict mvals = mats[nmodes]->values; memset(mvals, 0, tmpI*stride*sizeof(ptiValue)); #pragma omp parallel for schedule(static) num_threads(tk) for(ptiNnzIndex x=0; x<nnz; ++x) { ptiValueVector scratch; // Temporary array ptiNewValueVector(&scratch, R, R); ptiConstantValueVector(&scratch, 0); ptiIndex times_mat_index = mats_order[1]; ptiMatrix * times_mat = mats[times_mat_index]; ptiIndex * times_inds = X->inds[times_mat_index].data; ptiIndex tmp_i = times_inds[x]; ptiValue const entry = vals[x]; #pragma omp simd for(ptiIndex r=0; r<R; ++r) { scratch.data[r] = entry * times_mat->values[tmp_i * stride + r]; } for(ptiIndex i=2; i<nmodes; ++i) { times_mat_index = mats_order[i]; times_mat = mats[times_mat_index]; times_inds = X->inds[times_mat_index].data; tmp_i = times_inds[x]; #pragma omp simd for(ptiIndex r=0; r<R; ++r) { scratch.data[r] *= times_mat->values[tmp_i * stride + r]; } } ptiIndex const mode_i = mode_ind[x]; ptiValue * const restrict mvals_row = mvals + mode_i * stride; for(ptiIndex r=0; r<R; ++r) { #pragma omp atomic update mvals_row[r] += scratch.data[r]; } ptiFreeValueVector(&scratch); } // End loop nnzs return 0; } int ptiOmpMTTKRP_3D(ptiSparseTensor const * const X, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk) { ptiIndex const nmodes = X->nmodes; ptiNnzIndex const nnz = X->nnz; ptiIndex const * const ndims = X->ndims; ptiValue const * const restrict vals = X->values.data; ptiIndex const stride = mats[0]->stride; /* Check the mats. */ ptiAssert(nmodes ==3); for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiIndex const R = mats[mode]->ncols; ptiIndex const * const restrict mode_ind = X->inds[mode].data; ptiValue * const restrict mvals = mats[nmodes]->values; memset(mvals, 0, tmpI*stride*sizeof(ptiValue)); ptiIndex times_mat_index_1 = mats_order[1]; ptiMatrix * restrict times_mat_1 = mats[times_mat_index_1]; ptiIndex * restrict times_inds_1 = X->inds[times_mat_index_1].data; ptiIndex times_mat_index_2 = mats_order[2]; ptiMatrix * restrict times_mat_2 = mats[times_mat_index_2]; ptiIndex * restrict times_inds_2 = X->inds[times_mat_index_2].data; #pragma omp parallel for schedule(static) num_threads(tk) for(ptiNnzIndex x=0; x<nnz; ++x) { ptiIndex mode_i = mode_ind[x]; ptiValue * const restrict mvals_row = mvals + mode_i * stride; ptiIndex tmp_i_1 = times_inds_1[x]; ptiIndex tmp_i_2 = times_inds_2[x]; ptiValue entry = vals[x]; for(ptiIndex r=0; r<R; ++r) { #pragma omp atomic update mvals_row[r] += entry * times_mat_1->values[tmp_i_1 * stride + r] * times_mat_2->values[tmp_i_2 * stride + r]; } } return 0; } int ptiOmpMTTKRP_Lock(ptiSparseTensor const * const X, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk, ptiMutexPool * lock_pool) { ptiIndex const nmodes = X->nmodes; if(nmodes == 3) { ptiAssert(ptiOmpMTTKRP_3D_Lock(X, mats, mats_order, mode, tk, lock_pool) == 0); return 0; } ptiNnzIndex const nnz = X->nnz; ptiIndex const * const ndims = X->ndims; ptiValue const * const vals = X->values.data; ptiIndex const stride = mats[0]->stride; /* Check the mats. */ for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiIndex const R = mats[mode]->ncols; ptiIndex const * const mode_ind = X->inds[mode].data; ptiValue * const mvals = mats[nmodes]->values; memset(mvals, 0, tmpI*stride*sizeof(ptiValue)); #pragma omp parallel for schedule(static) num_threads(tk) for(ptiNnzIndex x=0; x<nnz; ++x) { ptiValueVector scratch; // Temporary array ptiNewValueVector(&scratch, R, R); ptiConstantValueVector(&scratch, 0); ptiIndex times_mat_index = mats_order[1]; ptiMatrix * times_mat = mats[times_mat_index]; ptiIndex * times_inds = X->inds[times_mat_index].data; ptiIndex tmp_i = times_inds[x]; ptiValue const entry = vals[x]; for(ptiIndex r=0; r<R; ++r) { scratch.data[r] = entry * times_mat->values[tmp_i * stride + r]; } for(ptiIndex i=2; i<nmodes; ++i) { times_mat_index = mats_order[i]; times_mat = mats[times_mat_index]; times_inds = X->inds[times_mat_index].data; tmp_i = times_inds[x]; for(ptiIndex r=0; r<R; ++r) { scratch.data[r] *= times_mat->values[tmp_i * stride + r]; } } ptiIndex const mode_i = mode_ind[x]; ptiValue * const restrict mvals_row = mvals + mode_i * stride; ptiMutexSetLock(lock_pool, mode_i); for(ptiIndex r=0; r<R; ++r) { mvals_row[r] += scratch.data[r]; } ptiMutexUnsetLock(lock_pool, mode_i); ptiFreeValueVector(&scratch); } // End loop nnzs return 0; } int ptiOmpMTTKRP_3D_Lock(ptiSparseTensor const * const X, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk, ptiMutexPool * lock_pool) { ptiIndex const nmodes = X->nmodes; ptiNnzIndex const nnz = X->nnz; ptiIndex const * const ndims = X->ndims; ptiValue const * const restrict vals = X->values.data; ptiIndex const stride = mats[0]->stride; /* Check the mats. */ ptiAssert(nmodes ==3); for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiIndex const R = mats[mode]->ncols; ptiIndex const * const restrict mode_ind = X->inds[mode].data; ptiValue * const restrict mvals = mats[nmodes]->values; memset(mvals, 0, tmpI*stride*sizeof(ptiValue)); ptiIndex times_mat_index_1 = mats_order[1]; ptiMatrix * restrict times_mat_1 = mats[times_mat_index_1]; ptiIndex * restrict times_inds_1 = X->inds[times_mat_index_1].data; ptiIndex times_mat_index_2 = mats_order[2]; ptiMatrix * restrict times_mat_2 = mats[times_mat_index_2]; ptiIndex * restrict times_inds_2 = X->inds[times_mat_index_2].data; #pragma omp parallel for schedule(static) num_threads(tk) for(ptiNnzIndex x=0; x<nnz; ++x) { ptiValueVector scratch; // Temporary array ptiNewValueVector(&scratch, R, R); ptiConstantValueVector(&scratch, 0); ptiIndex mode_i = mode_ind[x]; ptiValue * const restrict mvals_row = mvals + mode_i * stride; ptiIndex tmp_i_1 = times_inds_1[x]; ptiIndex tmp_i_2 = times_inds_2[x]; ptiValue entry = vals[x]; for(ptiIndex r=0; r<R; ++r) { scratch.data[r] = entry * times_mat_1->values[tmp_i_1 * stride + r] * times_mat_2->values[tmp_i_2 * stride + r]; } ptiMutexSetLock(lock_pool, mode_i); for(ptiIndex r=0; r<R; ++r) { mvals_row[r] += scratch.data[r]; } ptiMutexUnsetLock(lock_pool, mode_i); ptiFreeValueVector(&scratch); } return 0; } int ptiOmpMTTKRP_Reduce(ptiSparseTensor const * const X, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiMatrix * copy_mats[], // temporary matrices for reduction ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk) { ptiIndex const nmodes = X->nmodes; if(nmodes == 3) { ptiAssert(ptiOmpMTTKRP_3D_Reduce(X, mats, copy_mats, mats_order, mode, tk) == 0); return 0; } ptiNnzIndex const nnz = X->nnz; ptiIndex const * const ndims = X->ndims; ptiValue const * const vals = X->values.data; ptiIndex const stride = mats[0]->stride; /* Check the mats. */ for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiIndex const R = mats[mode]->ncols; ptiIndex const * const mode_ind = X->inds[mode].data; ptiMatrix * const M = mats[nmodes]; ptiValue * const mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(ptiValue)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } #pragma omp parallel for schedule(static) num_threads(tk) for(ptiNnzIndex x=0; x<nnz; ++x) { int tid = omp_get_thread_num(); ptiValueVector scratch; // Temporary array ptiNewValueVector(&scratch, R, R); ptiConstantValueVector(&scratch, 0); ptiIndex times_mat_index = mats_order[1]; ptiMatrix * times_mat = mats[times_mat_index]; ptiIndex * times_inds = X->inds[times_mat_index].data; ptiIndex tmp_i = times_inds[x]; ptiValue const entry = vals[x]; #pragma omp simd for(ptiIndex r=0; r<R; ++r) { scratch.data[r] = entry * times_mat->values[tmp_i * stride + r]; } for(ptiIndex i=2; i<nmodes; ++i) { times_mat_index = mats_order[i]; times_mat = mats[times_mat_index]; times_inds = X->inds[times_mat_index].data; tmp_i = times_inds[x]; #pragma omp simd for(ptiIndex r=0; r<R; ++r) { scratch.data[r] *= times_mat->values[tmp_i * stride + r]; } } ptiIndex const mode_i = mode_ind[x]; #pragma omp simd for(ptiIndex r=0; r<R; ++r) { copy_mats[tid]->values[mode_i * stride + r] += scratch.data[r]; } ptiFreeValueVector(&scratch); } // End loop nnzs /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(ptiIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk; ++t) { #pragma omp simd for(ptiIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } return 0; } int ptiOmpMTTKRP_3D_Reduce(ptiSparseTensor const * const X, ptiMatrix * mats[], // mats[nmodes] as temporary space. ptiMatrix * copy_mats[], // temporary matrices for reduction ptiIndex const mats_order[], // Correspond to the mode order of X. ptiIndex const mode, const int tk) { ptiIndex const nmodes = X->nmodes; ptiNnzIndex const nnz = X->nnz; ptiIndex const * const ndims = X->ndims; ptiValue const * const restrict vals = X->values.data; ptiIndex const stride = mats[0]->stride; /* Check the mats. */ ptiAssert(nmodes ==3); for(ptiIndex i=0; i<nmodes; ++i) { if(mats[i]->ncols != mats[nmodes]->ncols) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->cols != mats[nmodes]->ncols"); } if(mats[i]->nrows != ndims[i]) { pti_CheckError(PTIERR_SHAPE_MISMATCH, "CPU SpTns MTTKRP", "mats[i]->nrows != ndims[i]"); } } ptiIndex const tmpI = mats[mode]->nrows; ptiIndex const R = mats[mode]->ncols; ptiIndex const * const restrict mode_ind = X->inds[mode].data; ptiMatrix * const restrict M = mats[nmodes]; ptiValue * const restrict mvals = M->values; memset(mvals, 0, tmpI*stride*sizeof(ptiValue)); for(int t=0; t<tk; ++t) { memset(copy_mats[t]->values, 0, ndims[mode]*stride*sizeof(*(copy_mats[t]->values))); } ptiIndex times_mat_index_1 = mats_order[1]; ptiMatrix * restrict times_mat_1 = mats[times_mat_index_1]; ptiIndex * restrict times_inds_1 = X->inds[times_mat_index_1].data; ptiIndex times_mat_index_2 = mats_order[2]; ptiMatrix * restrict times_mat_2 = mats[times_mat_index_2]; ptiIndex * restrict times_inds_2 = X->inds[times_mat_index_2].data; #pragma omp parallel for schedule(static) num_threads(tk) for(ptiNnzIndex x=0; x<nnz; ++x) { int tid = omp_get_thread_num(); ptiIndex mode_i = mode_ind[x]; ptiIndex tmp_i_1 = times_inds_1[x]; ptiIndex tmp_i_2 = times_inds_2[x]; ptiValue entry = vals[x]; #pragma omp simd for(ptiIndex r=0; r<R; ++r) { copy_mats[tid]->values[mode_i * stride + r] += entry * times_mat_1->values[tmp_i_1 * stride + r] * times_mat_2->values[tmp_i_2 * stride + r]; } } /* Reduction */ #pragma omp parallel for schedule(static) num_threads(tk) for(ptiIndex i=0; i<ndims[mode]; ++i) { for(int t=0; t<tk; ++t) { #pragma omp simd for(ptiIndex r=0; r<R; ++r) { mvals[i * stride + r] += copy_mats[t]->values[i * stride + r]; } } } return 0; }
sp-wrongSVE.c
typedef signed char __int8_t; typedef unsigned char __uint8_t; typedef short __int16_t; typedef unsigned short __uint16_t; typedef int __int32_t; typedef unsigned int __uint32_t; typedef long long __int64_t; typedef unsigned long long __uint64_t; typedef long __darwin_intptr_t; typedef unsigned int __darwin_natural_t; typedef int __darwin_ct_rune_t; union stUn_imopVarPre0 { char __mbstate8[128]; long long _mbstateL; } ; typedef union stUn_imopVarPre0 __mbstate_t; typedef __mbstate_t __darwin_mbstate_t; typedef long int __darwin_ptrdiff_t; typedef long unsigned int __darwin_size_t; typedef __builtin_va_list __darwin_va_list; typedef int __darwin_wchar_t; typedef __darwin_wchar_t __darwin_rune_t; typedef int __darwin_wint_t; typedef unsigned long __darwin_clock_t; typedef __uint32_t __darwin_socklen_t; typedef long __darwin_ssize_t; typedef long __darwin_time_t; typedef __int64_t __darwin_blkcnt_t; typedef __int32_t __darwin_blksize_t; typedef __int32_t __darwin_dev_t; typedef unsigned int __darwin_fsblkcnt_t; typedef unsigned int __darwin_fsfilcnt_t; typedef __uint32_t __darwin_gid_t; typedef __uint32_t __darwin_id_t; typedef __uint64_t __darwin_ino64_t; typedef __darwin_ino64_t __darwin_ino_t; typedef __darwin_natural_t __darwin_mach_port_name_t; typedef __darwin_mach_port_name_t __darwin_mach_port_t; typedef __uint16_t __darwin_mode_t; typedef __int64_t __darwin_off_t; typedef __int32_t __darwin_pid_t; typedef __uint32_t __darwin_sigset_t; typedef __int32_t __darwin_suseconds_t; typedef __uint32_t __darwin_uid_t; typedef __uint32_t __darwin_useconds_t; typedef unsigned char __darwin_uuid_t[16]; typedef char __darwin_uuid_string_t[37]; struct __darwin_pthread_handler_rec { void ( *__routine )(void *); void *__arg; struct __darwin_pthread_handler_rec *__next; } ; struct _opaque_pthread_attr_t { long __sig; char __opaque[56]; } ; struct _opaque_pthread_cond_t { long __sig; char __opaque[40]; } ; struct _opaque_pthread_condattr_t { long __sig; char __opaque[8]; } ; struct _opaque_pthread_mutex_t { long __sig; char __opaque[56]; } ; struct _opaque_pthread_mutexattr_t { long __sig; char __opaque[8]; } ; struct _opaque_pthread_once_t { long __sig; char __opaque[8]; } ; struct _opaque_pthread_rwlock_t { long __sig; char __opaque[192]; } ; struct _opaque_pthread_rwlockattr_t { long __sig; char __opaque[16]; } ; struct _opaque_pthread_t { long __sig; struct __darwin_pthread_handler_rec *__cleanup_stack; char __opaque[8176]; } ; typedef struct _opaque_pthread_attr_t __darwin_pthread_attr_t; typedef struct _opaque_pthread_cond_t __darwin_pthread_cond_t; typedef struct _opaque_pthread_condattr_t __darwin_pthread_condattr_t; typedef unsigned long __darwin_pthread_key_t; typedef struct _opaque_pthread_mutex_t __darwin_pthread_mutex_t; typedef struct _opaque_pthread_mutexattr_t __darwin_pthread_mutexattr_t; typedef struct _opaque_pthread_once_t __darwin_pthread_once_t; typedef struct _opaque_pthread_rwlock_t __darwin_pthread_rwlock_t; typedef struct _opaque_pthread_rwlockattr_t __darwin_pthread_rwlockattr_t; typedef struct _opaque_pthread_t *__darwin_pthread_t; typedef int __darwin_nl_item; typedef int __darwin_wctrans_t; typedef __uint32_t __darwin_wctype_t; typedef __darwin_va_list va_list; typedef __darwin_size_t size_t; typedef __darwin_off_t fpos_t; struct __sbuf { unsigned char *_base; int _size; } ; struct __sFILEX ; struct __sFILE { unsigned char *_p; int _r; int _w; short _flags; short _file; struct __sbuf _bf; int _lbfsize; void *_cookie; int ( *_close )(void *); int ( *_read )(void *, char * , int ); fpos_t ( *_seek )(void *, fpos_t , int ); int ( *_write )(void *, const char * , int ); struct __sbuf _ub; struct __sFILEX *_extra; int _ur; unsigned char _ubuf[3]; unsigned char _nbuf[1]; struct __sbuf _lb; int _blksize; fpos_t _offset; } ; typedef struct __sFILE FILE; int fclose(FILE *); int fgetc(FILE *); FILE *fopen(const char *restrict __filename, const char *restrict __mode); int fscanf(FILE *restrict , const char *restrict , ...); int printf(const char *restrict , ...); typedef __darwin_off_t off_t; typedef __darwin_ssize_t ssize_t; enum enum_imopVarPre1 { P_ALL, P_PID , P_PGID } ; typedef enum enum_imopVarPre1 idtype_t; typedef __darwin_pid_t pid_t; typedef __darwin_id_t id_t; typedef int sig_atomic_t; struct __darwin_i386_thread_state { unsigned int __eax; unsigned int __ebx; unsigned int __ecx; unsigned int __edx; unsigned int __edi; unsigned int __esi; unsigned int __ebp; unsigned int __esp; unsigned int __ss; unsigned int __eflags; unsigned int __eip; unsigned int __cs; unsigned int __ds; unsigned int __es; unsigned int __fs; unsigned int __gs; } ; struct __darwin_fp_control { unsigned short __invalid: 1, __denorm: 1 , __zdiv: 1 , __ovrfl: 1 , __undfl: 1 , __precis: 1 , :2 , __pc: 2 , __rc: 2 , :1 , :3; } ; typedef struct __darwin_fp_control __darwin_fp_control_t; struct __darwin_fp_status { unsigned short __invalid: 1, __denorm: 1 , __zdiv: 1 , __ovrfl: 1 , __undfl: 1 , __precis: 1 , __stkflt: 1 , __errsumm: 1 , __c0: 1 , __c1: 1 , __c2: 1 , __tos: 3 , __c3: 1 , __busy: 1; } ; typedef struct __darwin_fp_status __darwin_fp_status_t; struct __darwin_mmst_reg { char __mmst_reg[10]; char __mmst_rsrv[6]; } ; struct __darwin_xmm_reg { char __xmm_reg[16]; } ; struct __darwin_i386_float_state { int __fpu_reserved[2]; struct __darwin_fp_control __fpu_fcw; struct __darwin_fp_status __fpu_fsw; __uint8_t __fpu_ftw; __uint8_t __fpu_rsrv1; __uint16_t __fpu_fop; __uint32_t __fpu_ip; __uint16_t __fpu_cs; __uint16_t __fpu_rsrv2; __uint32_t __fpu_dp; __uint16_t __fpu_ds; __uint16_t __fpu_rsrv3; __uint32_t __fpu_mxcsr; __uint32_t __fpu_mxcsrmask; struct __darwin_mmst_reg __fpu_stmm0; struct __darwin_mmst_reg __fpu_stmm1; struct __darwin_mmst_reg __fpu_stmm2; struct __darwin_mmst_reg __fpu_stmm3; struct __darwin_mmst_reg __fpu_stmm4; struct __darwin_mmst_reg __fpu_stmm5; struct __darwin_mmst_reg __fpu_stmm6; struct __darwin_mmst_reg __fpu_stmm7; struct __darwin_xmm_reg __fpu_xmm0; struct __darwin_xmm_reg __fpu_xmm1; struct __darwin_xmm_reg __fpu_xmm2; struct __darwin_xmm_reg __fpu_xmm3; struct __darwin_xmm_reg __fpu_xmm4; struct __darwin_xmm_reg __fpu_xmm5; struct __darwin_xmm_reg __fpu_xmm6; struct __darwin_xmm_reg __fpu_xmm7; char __fpu_rsrv4[14 * 16]; int __fpu_reserved1; } ; struct __darwin_i386_avx_state { int __fpu_reserved[2]; struct __darwin_fp_control __fpu_fcw; struct __darwin_fp_status __fpu_fsw; __uint8_t __fpu_ftw; __uint8_t __fpu_rsrv1; __uint16_t __fpu_fop; __uint32_t __fpu_ip; __uint16_t __fpu_cs; __uint16_t __fpu_rsrv2; __uint32_t __fpu_dp; __uint16_t __fpu_ds; __uint16_t __fpu_rsrv3; __uint32_t __fpu_mxcsr; __uint32_t __fpu_mxcsrmask; struct __darwin_mmst_reg __fpu_stmm0; struct __darwin_mmst_reg __fpu_stmm1; struct __darwin_mmst_reg __fpu_stmm2; struct __darwin_mmst_reg __fpu_stmm3; struct __darwin_mmst_reg __fpu_stmm4; struct __darwin_mmst_reg __fpu_stmm5; struct __darwin_mmst_reg __fpu_stmm6; struct __darwin_mmst_reg __fpu_stmm7; struct __darwin_xmm_reg __fpu_xmm0; struct __darwin_xmm_reg __fpu_xmm1; struct __darwin_xmm_reg __fpu_xmm2; struct __darwin_xmm_reg __fpu_xmm3; struct __darwin_xmm_reg __fpu_xmm4; struct __darwin_xmm_reg __fpu_xmm5; struct __darwin_xmm_reg __fpu_xmm6; struct __darwin_xmm_reg __fpu_xmm7; char __fpu_rsrv4[14 * 16]; int __fpu_reserved1; char __avx_reserved1[64]; struct __darwin_xmm_reg __fpu_ymmh0; struct __darwin_xmm_reg __fpu_ymmh1; struct __darwin_xmm_reg __fpu_ymmh2; struct __darwin_xmm_reg __fpu_ymmh3; struct __darwin_xmm_reg __fpu_ymmh4; struct __darwin_xmm_reg __fpu_ymmh5; struct __darwin_xmm_reg __fpu_ymmh6; struct __darwin_xmm_reg __fpu_ymmh7; } ; struct __darwin_i386_exception_state { __uint16_t __trapno; __uint16_t __cpu; __uint32_t __err; __uint32_t __faultvaddr; } ; struct __darwin_x86_debug_state32 { unsigned int __dr0; unsigned int __dr1; unsigned int __dr2; unsigned int __dr3; unsigned int __dr4; unsigned int __dr5; unsigned int __dr6; unsigned int __dr7; } ; struct __darwin_x86_thread_state64 { __uint64_t __rax; __uint64_t __rbx; __uint64_t __rcx; __uint64_t __rdx; __uint64_t __rdi; __uint64_t __rsi; __uint64_t __rbp; __uint64_t __rsp; __uint64_t __r8; __uint64_t __r9; __uint64_t __r10; __uint64_t __r11; __uint64_t __r12; __uint64_t __r13; __uint64_t __r14; __uint64_t __r15; __uint64_t __rip; __uint64_t __rflags; __uint64_t __cs; __uint64_t __fs; __uint64_t __gs; } ; struct __darwin_x86_float_state64 { int __fpu_reserved[2]; struct __darwin_fp_control __fpu_fcw; struct __darwin_fp_status __fpu_fsw; __uint8_t __fpu_ftw; __uint8_t __fpu_rsrv1; __uint16_t __fpu_fop; __uint32_t __fpu_ip; __uint16_t __fpu_cs; __uint16_t __fpu_rsrv2; __uint32_t __fpu_dp; __uint16_t __fpu_ds; __uint16_t __fpu_rsrv3; __uint32_t __fpu_mxcsr; __uint32_t __fpu_mxcsrmask; struct __darwin_mmst_reg __fpu_stmm0; struct __darwin_mmst_reg __fpu_stmm1; struct __darwin_mmst_reg __fpu_stmm2; struct __darwin_mmst_reg __fpu_stmm3; struct __darwin_mmst_reg __fpu_stmm4; struct __darwin_mmst_reg __fpu_stmm5; struct __darwin_mmst_reg __fpu_stmm6; struct __darwin_mmst_reg __fpu_stmm7; struct __darwin_xmm_reg __fpu_xmm0; struct __darwin_xmm_reg __fpu_xmm1; struct __darwin_xmm_reg __fpu_xmm2; struct __darwin_xmm_reg __fpu_xmm3; struct __darwin_xmm_reg __fpu_xmm4; struct __darwin_xmm_reg __fpu_xmm5; struct __darwin_xmm_reg __fpu_xmm6; struct __darwin_xmm_reg __fpu_xmm7; struct __darwin_xmm_reg __fpu_xmm8; struct __darwin_xmm_reg __fpu_xmm9; struct __darwin_xmm_reg __fpu_xmm10; struct __darwin_xmm_reg __fpu_xmm11; struct __darwin_xmm_reg __fpu_xmm12; struct __darwin_xmm_reg __fpu_xmm13; struct __darwin_xmm_reg __fpu_xmm14; struct __darwin_xmm_reg __fpu_xmm15; char __fpu_rsrv4[6 * 16]; int __fpu_reserved1; } ; struct __darwin_x86_avx_state64 { int __fpu_reserved[2]; struct __darwin_fp_control __fpu_fcw; struct __darwin_fp_status __fpu_fsw; __uint8_t __fpu_ftw; __uint8_t __fpu_rsrv1; __uint16_t __fpu_fop; __uint32_t __fpu_ip; __uint16_t __fpu_cs; __uint16_t __fpu_rsrv2; __uint32_t __fpu_dp; __uint16_t __fpu_ds; __uint16_t __fpu_rsrv3; __uint32_t __fpu_mxcsr; __uint32_t __fpu_mxcsrmask; struct __darwin_mmst_reg __fpu_stmm0; struct __darwin_mmst_reg __fpu_stmm1; struct __darwin_mmst_reg __fpu_stmm2; struct __darwin_mmst_reg __fpu_stmm3; struct __darwin_mmst_reg __fpu_stmm4; struct __darwin_mmst_reg __fpu_stmm5; struct __darwin_mmst_reg __fpu_stmm6; struct __darwin_mmst_reg __fpu_stmm7; struct __darwin_xmm_reg __fpu_xmm0; struct __darwin_xmm_reg __fpu_xmm1; struct __darwin_xmm_reg __fpu_xmm2; struct __darwin_xmm_reg __fpu_xmm3; struct __darwin_xmm_reg __fpu_xmm4; struct __darwin_xmm_reg __fpu_xmm5; struct __darwin_xmm_reg __fpu_xmm6; struct __darwin_xmm_reg __fpu_xmm7; struct __darwin_xmm_reg __fpu_xmm8; struct __darwin_xmm_reg __fpu_xmm9; struct __darwin_xmm_reg __fpu_xmm10; struct __darwin_xmm_reg __fpu_xmm11; struct __darwin_xmm_reg __fpu_xmm12; struct __darwin_xmm_reg __fpu_xmm13; struct __darwin_xmm_reg __fpu_xmm14; struct __darwin_xmm_reg __fpu_xmm15; char __fpu_rsrv4[6 * 16]; int __fpu_reserved1; char __avx_reserved1[64]; struct __darwin_xmm_reg __fpu_ymmh0; struct __darwin_xmm_reg __fpu_ymmh1; struct __darwin_xmm_reg __fpu_ymmh2; struct __darwin_xmm_reg __fpu_ymmh3; struct __darwin_xmm_reg __fpu_ymmh4; struct __darwin_xmm_reg __fpu_ymmh5; struct __darwin_xmm_reg __fpu_ymmh6; struct __darwin_xmm_reg __fpu_ymmh7; struct __darwin_xmm_reg __fpu_ymmh8; struct __darwin_xmm_reg __fpu_ymmh9; struct __darwin_xmm_reg __fpu_ymmh10; struct __darwin_xmm_reg __fpu_ymmh11; struct __darwin_xmm_reg __fpu_ymmh12; struct __darwin_xmm_reg __fpu_ymmh13; struct __darwin_xmm_reg __fpu_ymmh14; struct __darwin_xmm_reg __fpu_ymmh15; } ; struct __darwin_x86_exception_state64 { __uint16_t __trapno; __uint16_t __cpu; __uint32_t __err; __uint64_t __faultvaddr; } ; struct __darwin_x86_debug_state64 { __uint64_t __dr0; __uint64_t __dr1; __uint64_t __dr2; __uint64_t __dr3; __uint64_t __dr4; __uint64_t __dr5; __uint64_t __dr6; __uint64_t __dr7; } ; struct __darwin_mcontext32 { struct __darwin_i386_exception_state __es; struct __darwin_i386_thread_state __ss; struct __darwin_i386_float_state __fs; } ; struct __darwin_mcontext_avx32 { struct __darwin_i386_exception_state __es; struct __darwin_i386_thread_state __ss; struct __darwin_i386_avx_state __fs; } ; struct __darwin_mcontext64 { struct __darwin_x86_exception_state64 __es; struct __darwin_x86_thread_state64 __ss; struct __darwin_x86_float_state64 __fs; } ; struct __darwin_mcontext_avx64 { struct __darwin_x86_exception_state64 __es; struct __darwin_x86_thread_state64 __ss; struct __darwin_x86_avx_state64 __fs; } ; typedef struct __darwin_mcontext64 *mcontext_t; typedef __darwin_pthread_attr_t pthread_attr_t; struct __darwin_sigaltstack { void *ss_sp; __darwin_size_t ss_size; int ss_flags; } ; typedef struct __darwin_sigaltstack stack_t; struct __darwin_ucontext { int uc_onstack; __darwin_sigset_t uc_sigmask; struct __darwin_sigaltstack uc_stack; struct __darwin_ucontext *uc_link; __darwin_size_t uc_mcsize; struct __darwin_mcontext64 *uc_mcontext; } ; typedef struct __darwin_ucontext ucontext_t; typedef __darwin_sigset_t sigset_t; typedef __darwin_uid_t uid_t; union sigval { int sival_int; void *sival_ptr; } ; struct sigevent { int sigev_notify; int sigev_signo; union sigval sigev_value; void ( *sigev_notify_function )(union sigval ); pthread_attr_t *sigev_notify_attributes; } ; struct __siginfo { int si_signo; int si_errno; int si_code; pid_t si_pid; uid_t si_uid; int si_status; void *si_addr; union sigval si_value; long si_band; unsigned long __pad[7]; } ; typedef struct __siginfo siginfo_t; union __sigaction_u { void ( *__sa_handler )(int ); void ( *__sa_sigaction )(int , struct __siginfo * , void *); } ; struct __sigaction { union __sigaction_u __sigaction_u; void ( *sa_tramp )(void *, int , int , siginfo_t * , void *); sigset_t sa_mask; int sa_flags; } ; struct sigaction { union __sigaction_u __sigaction_u; sigset_t sa_mask; int sa_flags; } ; typedef void ( *sig_t )(int ); struct sigvec { void ( *sv_handler )(int ); int sv_mask; int sv_flags; } ; struct sigstack { char *ss_sp; int ss_onstack; } ; typedef signed char int8_t; typedef short int16_t; typedef int int32_t; typedef long long int64_t; typedef unsigned char uint8_t; typedef unsigned short uint16_t; typedef unsigned int uint32_t; typedef unsigned long long uint64_t; typedef int8_t int_least8_t; typedef int16_t int_least16_t; typedef int32_t int_least32_t; typedef int64_t int_least64_t; typedef uint8_t uint_least8_t; typedef uint16_t uint_least16_t; typedef uint32_t uint_least32_t; typedef uint64_t uint_least64_t; typedef int8_t int_fast8_t; typedef int16_t int_fast16_t; typedef int32_t int_fast32_t; typedef int64_t int_fast64_t; typedef uint8_t uint_fast8_t; typedef uint16_t uint_fast16_t; typedef uint32_t uint_fast32_t; typedef uint64_t uint_fast64_t; typedef __darwin_intptr_t intptr_t; typedef unsigned long uintptr_t; typedef long int intmax_t; typedef long unsigned int uintmax_t; struct timeval { __darwin_time_t tv_sec; __darwin_suseconds_t tv_usec; } ; typedef __uint64_t rlim_t; struct rusage { struct timeval ru_utime; struct timeval ru_stime; long ru_maxrss; long ru_ixrss; long ru_idrss; long ru_isrss; long ru_minflt; long ru_majflt; long ru_nswap; long ru_inblock; long ru_oublock; long ru_msgsnd; long ru_msgrcv; long ru_nsignals; long ru_nvcsw; long ru_nivcsw; } ; typedef void *rusage_info_t; struct rusage_info_v0 { uint8_t ri_uuid[16]; uint64_t ri_user_time; uint64_t ri_system_time; uint64_t ri_pkg_idle_wkups; uint64_t ri_interrupt_wkups; uint64_t ri_pageins; uint64_t ri_wired_size; uint64_t ri_resident_size; uint64_t ri_phys_footprint; uint64_t ri_proc_start_abstime; uint64_t ri_proc_exit_abstime; } ; struct rusage_info_v1 { uint8_t ri_uuid[16]; uint64_t ri_user_time; uint64_t ri_system_time; uint64_t ri_pkg_idle_wkups; uint64_t ri_interrupt_wkups; uint64_t ri_pageins; uint64_t ri_wired_size; uint64_t ri_resident_size; uint64_t ri_phys_footprint; uint64_t ri_proc_start_abstime; uint64_t ri_proc_exit_abstime; uint64_t ri_child_user_time; uint64_t ri_child_system_time; uint64_t ri_child_pkg_idle_wkups; uint64_t ri_child_interrupt_wkups; uint64_t ri_child_pageins; uint64_t ri_child_elapsed_abstime; } ; struct rusage_info_v2 { uint8_t ri_uuid[16]; uint64_t ri_user_time; uint64_t ri_system_time; uint64_t ri_pkg_idle_wkups; uint64_t ri_interrupt_wkups; uint64_t ri_pageins; uint64_t ri_wired_size; uint64_t ri_resident_size; uint64_t ri_phys_footprint; uint64_t ri_proc_start_abstime; uint64_t ri_proc_exit_abstime; uint64_t ri_child_user_time; uint64_t ri_child_system_time; uint64_t ri_child_pkg_idle_wkups; uint64_t ri_child_interrupt_wkups; uint64_t ri_child_pageins; uint64_t ri_child_elapsed_abstime; uint64_t ri_diskio_bytesread; uint64_t ri_diskio_byteswritten; } ; struct rusage_info_v3 { uint8_t ri_uuid[16]; uint64_t ri_user_time; uint64_t ri_system_time; uint64_t ri_pkg_idle_wkups; uint64_t ri_interrupt_wkups; uint64_t ri_pageins; uint64_t ri_wired_size; uint64_t ri_resident_size; uint64_t ri_phys_footprint; uint64_t ri_proc_start_abstime; uint64_t ri_proc_exit_abstime; uint64_t ri_child_user_time; uint64_t ri_child_system_time; uint64_t ri_child_pkg_idle_wkups; uint64_t ri_child_interrupt_wkups; uint64_t ri_child_pageins; uint64_t ri_child_elapsed_abstime; uint64_t ri_diskio_bytesread; uint64_t ri_diskio_byteswritten; uint64_t ri_cpu_time_qos_default; uint64_t ri_cpu_time_qos_maintenance; uint64_t ri_cpu_time_qos_background; uint64_t ri_cpu_time_qos_utility; uint64_t ri_cpu_time_qos_legacy; uint64_t ri_cpu_time_qos_user_initiated; uint64_t ri_cpu_time_qos_user_interactive; uint64_t ri_billed_system_time; uint64_t ri_serviced_system_time; } ; typedef struct rusage_info_v3 rusage_info_current; struct rlimit { rlim_t rlim_cur; rlim_t rlim_max; } ; struct proc_rlimit_control_wakeupmon { uint32_t wm_flags; int32_t wm_rate; } ; union wait { int w_status; struct stUn_imopVarPre2 { unsigned int w_Termsig: 7, w_Coredump: 1 , w_Retcode: 8 , w_Filler: 16; } w_T; struct stUn_imopVarPre3 { unsigned int w_Stopval: 8, w_Stopsig: 8 , w_Filler: 16; } w_S; } ; typedef __darwin_ct_rune_t ct_rune_t; typedef __darwin_rune_t rune_t; typedef __darwin_wchar_t wchar_t; struct stUn_imopVarPre4 { int quot; int rem; } ; typedef struct stUn_imopVarPre4 div_t; struct stUn_imopVarPre5 { long quot; long rem; } ; typedef struct stUn_imopVarPre5 ldiv_t; struct stUn_imopVarPre6 { long long quot; long long rem; } ; typedef struct stUn_imopVarPre6 lldiv_t; void exit(int ); typedef unsigned char u_int8_t; typedef unsigned short u_int16_t; typedef unsigned int u_int32_t; typedef unsigned long long u_int64_t; typedef int64_t register_t; typedef u_int64_t user_addr_t; typedef u_int64_t user_size_t; typedef int64_t user_ssize_t; typedef int64_t user_long_t; typedef u_int64_t user_ulong_t; typedef int64_t user_time_t; typedef int64_t user_off_t; typedef u_int64_t syscall_arg_t; typedef __darwin_dev_t dev_t; typedef __darwin_mode_t mode_t; typedef float float_t; typedef double double_t; extern double fabs(double ); extern double pow(double , double ); extern double sqrt(double ); struct __float2 { float __sinval; float __cosval; } ; struct __double2 { double __sinval; double __cosval; } ; struct exception { int type; char *name; double arg1; double arg2; double retval; } ; typedef int boolean; struct stUn_imopVarPre11 { double real; double imag; } ; typedef struct stUn_imopVarPre11 dcomplex; extern void timer_clear(int ); extern void timer_start(int ); extern void timer_stop(int ); extern double timer_read(int ); extern void c_print_results(char *name, char class , int n1 , int n2 , int n3 , int niter , int nthreads , double t , double mops , char *optype , int passed_verification , char *npbversion , char *compiletime , char *cc , char *clink , char *c_lib , char *c_inc , char *cflags , char *clinkflags , char *rand); static int grid_points[3]; static double tx1; static double tx2; static double tx3; static double ty1; static double ty2; static double ty3; static double tz1; static double tz2; static double tz3; static double dx1; static double dx2; static double dx3; static double dx4; static double dx5; static double dy1; static double dy2; static double dy3; static double dy4; static double dy5; static double dz1; static double dz2; static double dz3; static double dz4; static double dz5; static double dssp; static double dt; static double ce[13][5]; static double dxmax; static double dymax; static double dzmax; static double xxcon1; static double xxcon2; static double xxcon3; static double xxcon4; static double xxcon5; static double dx1tx1; static double dx2tx1; static double dx3tx1; static double dx4tx1; static double dx5tx1; static double yycon1; static double yycon2; static double yycon3; static double yycon4; static double yycon5; static double dy1ty1; static double dy2ty1; static double dy3ty1; static double dy4ty1; static double dy5ty1; static double zzcon1; static double zzcon2; static double zzcon3; static double zzcon4; static double zzcon5; static double dz1tz1; static double dz2tz1; static double dz3tz1; static double dz4tz1; static double dz5tz1; static double dnxm1; static double dnym1; static double dnzm1; static double c1c2; static double c1c5; static double c3c4; static double c1345; static double conz1; static double c1; static double c2; static double c3; static double c4; static double c5; static double c4dssp; static double c5dssp; static double dtdssp; static double dttx1; static double bt; static double dttx2; static double dtty1; static double dtty2; static double dttz1; static double dttz2; static double c2dttx1; static double c2dtty1; static double c2dttz1; static double comz1; static double comz4; static double comz5; static double comz6; static double c3c4tx3; static double c3c4ty3; static double c3c4tz3; static double c2iv; static double con43; static double con16; static double u[5][12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double us[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double vs[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double ws[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double qs[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double ainv[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double rho_i[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double speed[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double square[12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double rhs[5][12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double forcing[5][12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double lhs[15][12 / 2 * 2 + 1][12 / 2 * 2 + 1][12 / 2 * 2 + 1]; static double cv[12]; static double rhon[12]; static double rhos[12]; static double rhoq[12]; static double cuf[12]; static double q[12]; static double ue[5][12]; static double buf[5][12]; static void add(void ); static void adi(void ); static void error_norm(double rms[5]); static void rhs_norm(double rms[5]); static void exact_rhs(void ); static void exact_solution(double xi, double eta , double zeta , double dtemp[5]); static void initialize(void ); static void lhsinit(void ); static void lhsx(void ); static void lhsy(void ); static void lhsz(void ); static void ninvr(void ); static void pinvr(void ); static void compute_rhs(void ); static void set_constants(void ); static void txinvr(void ); static void tzetar(void ); static void verify(int no_time_steps, char *class , boolean *verified); static void x_solve(void ); static void y_solve(void ); static void z_solve(void ); int main(int argc, char **argv) { int niter; int step; double mflops; double tmax; int nthreads = 1; boolean verified; char class; FILE *fp; printf("\n\n NAS Parallel Benchmarks 3.0 structured OpenMP C version" " - SP Benchmark\n\n"); fp = fopen("inputsp.data", "r"); if (fp != ((void *) 0)) { printf(" Reading from input file inputsp.data\n"); int *_imopVarPre145; _imopVarPre145 = &niter; fscanf(fp, "%d", _imopVarPre145); int _imopVarPre147; _imopVarPre147 = fgetc(fp); while (_imopVarPre147 != '\n') { ; _imopVarPre147 = fgetc(fp); } double *_imopVarPre149; _imopVarPre149 = &dt; fscanf(fp, "%lf", _imopVarPre149); int _imopVarPre151; _imopVarPre151 = fgetc(fp); while (_imopVarPre151 != '\n') { ; _imopVarPre151 = fgetc(fp); } int *_imopVarPre155; int *_imopVarPre156; int *_imopVarPre157; _imopVarPre155 = &grid_points[2]; _imopVarPre156 = &grid_points[1]; _imopVarPre157 = &grid_points[0]; fscanf(fp, "%d%d%d", _imopVarPre157, _imopVarPre156, _imopVarPre155); fclose(fp); } else { printf(" No input file inputsp.data. Using compiled defaults"); niter = 100; dt = 0.015; grid_points[0] = 12; grid_points[1] = 12; grid_points[2] = 12; } int _imopVarPre161; int _imopVarPre162; int _imopVarPre163; _imopVarPre161 = grid_points[2]; _imopVarPre162 = grid_points[1]; _imopVarPre163 = grid_points[0]; printf(" Size: %3dx%3dx%3d\n", _imopVarPre163, _imopVarPre162, _imopVarPre161); printf(" Iterations: %3d dt: %10.6f\n", niter, dt); int _imopVarPre164; int _imopVarPre165; _imopVarPre164 = (grid_points[0] > 12); if (!_imopVarPre164) { _imopVarPre165 = (grid_points[1] > 12); if (!_imopVarPre165) { _imopVarPre165 = (grid_points[2] > 12); } _imopVarPre164 = _imopVarPre165; } if (_imopVarPre164) { int _imopVarPre169; int _imopVarPre170; int _imopVarPre171; _imopVarPre169 = grid_points[2]; _imopVarPre170 = grid_points[1]; _imopVarPre171 = grid_points[0]; printf("%d, %d, %d\n", _imopVarPre171, _imopVarPre170, _imopVarPre169); printf(" Problem size too big for compiled array sizes\n"); exit(1); } set_constants(); initialize(); lhsinit(); exact_rhs(); adi(); initialize(); timer_clear(1); timer_start(1); for (step = 1; step <= niter; step++) { int _imopVarPre172; _imopVarPre172 = step % 20 == 0; if (!_imopVarPre172) { _imopVarPre172 = step == 1; } if (_imopVarPre172) { printf(" Time step %4d\n", step); } adi(); } #pragma omp parallel { } timer_stop(1); tmax = timer_read(1); int *_imopVarPre175; char *_imopVarPre176; _imopVarPre175 = &verified; _imopVarPre176 = &class; verify(niter, _imopVarPre176, _imopVarPre175); if (tmax != 0) { double _imopVarPre183; double _imopVarPre184; _imopVarPre183 = (double) 12; _imopVarPre184 = pow(_imopVarPre183, 3.0); mflops = (881.174 * _imopVarPre184 - 4683.91 * (((double) 12) * ((double) 12)) + 11484.5 * (double) 12 - 19272.4) * (double) niter / (tmax * 1000000.0); } else { mflops = 0.0; } int _imopVarPre188; int _imopVarPre189; int _imopVarPre190; _imopVarPre188 = grid_points[2]; _imopVarPre189 = grid_points[1]; _imopVarPre190 = grid_points[0]; c_print_results("SP", class, _imopVarPre190, _imopVarPre189, _imopVarPre188, niter, nthreads, tmax, mflops, " floating point", verified, "3.0 structured", "21 Jul 2017", "gcc", "gcc", "(none)", "-I../common", "-O3 -fopenmp", "-O3 -fopenmp", "(none)"); } static void add(void ) { int i; int j; int k; int m; #pragma omp for nowait for (m = 0; m < 5; m++) { for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { u[m][i][j][k] = u[m][i][j][k] + rhs[m][i][j][k]; } } } } } static void adi(void ) { compute_rhs(); txinvr(); x_solve(); y_solve(); z_solve(); add(); } static void error_norm(double rms[5]) { int i; int j; int k; int m; int d; double xi; double eta; double zeta; double u_exact[5]; double add; for (m = 0; m < 5; m++) { rms[m] = 0.0; } for (i = 0; i <= grid_points[0] - 1; i++) { xi = (double) i * dnxm1; for (j = 0; j <= grid_points[1] - 1; j++) { eta = (double) j * dnym1; for (k = 0; k <= grid_points[2] - 1; k++) { zeta = (double) k * dnzm1; exact_solution(xi, eta, zeta, u_exact); for (m = 0; m < 5; m++) { add = u[m][i][j][k] - u_exact[m]; rms[m] = rms[m] + add * add; } } } } for (m = 0; m < 5; m++) { for (d = 0; d < 3; d++) { rms[m] = rms[m] / (double) (grid_points[d] - 2); } double _imopVarPre192; double _imopVarPre193; _imopVarPre192 = rms[m]; _imopVarPre193 = sqrt(_imopVarPre192); rms[m] = _imopVarPre193; } } static void rhs_norm(double rms[5]) { int i; int j; int k; int d; int m; double add; for (m = 0; m < 5; m++) { rms[m] = 0.0; } for (i = 0; i <= grid_points[0] - 2; i++) { for (j = 0; j <= grid_points[1] - 2; j++) { for (k = 0; k <= grid_points[2] - 2; k++) { for (m = 0; m < 5; m++) { add = rhs[m][i][j][k]; rms[m] = rms[m] + add * add; } } } } for (m = 0; m < 5; m++) { for (d = 0; d < 3; d++) { rms[m] = rms[m] / (double) (grid_points[d] - 2); } double _imopVarPre195; double _imopVarPre196; _imopVarPre195 = rms[m]; _imopVarPre196 = sqrt(_imopVarPre195); rms[m] = _imopVarPre196; } } static void exact_rhs(void ) { double dtemp[5]; double xi; double eta; double zeta; double dtpp; int m; int i; int j; int k; int ip1; int im1; int jp1; int jm1; int km1; int kp1; for (m = 0; m < 5; m++) { for (i = 0; i <= grid_points[0] - 1; i++) { for (j = 0; j <= grid_points[1] - 1; j++) { for (k = 0; k <= grid_points[2] - 1; k++) { forcing[m][i][j][k] = 0.0; } } } } for (k = 1; k <= grid_points[2] - 2; k++) { zeta = (double) k * dnzm1; for (j = 1; j <= grid_points[1] - 2; j++) { eta = (double) j * dnym1; for (i = 0; i <= grid_points[0] - 1; i++) { xi = (double) i * dnxm1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[m][i] = dtemp[m]; } dtpp = 1.0 / dtemp[0]; for (m = 1; m < 5; m++) { buf[m][i] = dtpp * dtemp[m]; } cuf[i] = buf[1][i] * buf[1][i]; buf[0][i] = cuf[i] + buf[2][i] * buf[2][i] + buf[3][i] * buf[3][i]; q[i] = 0.5 * (buf[1][i] * ue[1][i] + buf[2][i] * ue[2][i] + buf[3][i] * ue[3][i]); } for (i = 1; i <= grid_points[0] - 2; i++) { im1 = i - 1; ip1 = i + 1; forcing[0][i][j][k] = forcing[0][i][j][k] - tx2 * (ue[1][ip1] - ue[1][im1]) + dx1tx1 * (ue[0][ip1] - 2.0 * ue[0][i] + ue[0][im1]); forcing[1][i][j][k] = forcing[1][i][j][k] - tx2 * ((ue[1][ip1] * buf[1][ip1] + c2 * (ue[4][ip1] - q[ip1])) - (ue[1][im1] * buf[1][im1] + c2 * (ue[4][im1] - q[im1]))) + xxcon1 * (buf[1][ip1] - 2.0 * buf[1][i] + buf[1][im1]) + dx2tx1 * (ue[1][ip1] - 2.0 * ue[1][i] + ue[1][im1]); forcing[2][i][j][k] = forcing[2][i][j][k] - tx2 * (ue[2][ip1] * buf[1][ip1] - ue[2][im1] * buf[1][im1]) + xxcon2 * (buf[2][ip1] - 2.0 * buf[2][i] + buf[2][im1]) + dx3tx1 * (ue[2][ip1] - 2.0 * ue[2][i] + ue[2][im1]); forcing[3][i][j][k] = forcing[3][i][j][k] - tx2 * (ue[3][ip1] * buf[1][ip1] - ue[3][im1] * buf[1][im1]) + xxcon2 * (buf[3][ip1] - 2.0 * buf[3][i] + buf[3][im1]) + dx4tx1 * (ue[3][ip1] - 2.0 * ue[3][i] + ue[3][im1]); forcing[4][i][j][k] = forcing[4][i][j][k] - tx2 * (buf[1][ip1] * (c1 * ue[4][ip1] - c2 * q[ip1]) - buf[1][im1] * (c1 * ue[4][im1] - c2 * q[im1])) + 0.5 * xxcon3 * (buf[0][ip1] - 2.0 * buf[0][i] + buf[0][im1]) + xxcon4 * (cuf[ip1] - 2.0 * cuf[i] + cuf[im1]) + xxcon5 * (buf[4][ip1] - 2.0 * buf[4][i] + buf[4][im1]) + dx5tx1 * (ue[4][ip1] - 2.0 * ue[4][i] + ue[4][im1]); } for (m = 0; m < 5; m++) { i = 1; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (5.0 * ue[m][i] - 4.0 * ue[m][i + 1] + ue[m][i + 2]); i = 2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (-4.0 * ue[m][i - 1] + 6.0 * ue[m][i] - 4.0 * ue[m][i + 1] + ue[m][i + 2]); } for (m = 0; m < 5; m++) { for (i = 3; i <= grid_points[0] - 4; i++) { forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][i - 2] - 4.0 * ue[m][i - 1] + 6.0 * ue[m][i] - 4.0 * ue[m][i + 1] + ue[m][i + 2]); } } for (m = 0; m < 5; m++) { i = grid_points[0] - 3; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][i - 2] - 4.0 * ue[m][i - 1] + 6.0 * ue[m][i] - 4.0 * ue[m][i + 1]); i = grid_points[0] - 2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][i - 2] - 4.0 * ue[m][i - 1] + 5.0 * ue[m][i]); } } } for (k = 1; k <= grid_points[2] - 2; k++) { zeta = (double) k * dnzm1; for (i = 1; i <= grid_points[0] - 2; i++) { xi = (double) i * dnxm1; for (j = 0; j <= grid_points[1] - 1; j++) { eta = (double) j * dnym1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[m][j] = dtemp[m]; } dtpp = 1.0 / dtemp[0]; for (m = 1; m < 5; m++) { buf[m][j] = dtpp * dtemp[m]; } cuf[j] = buf[2][j] * buf[2][j]; buf[0][j] = cuf[j] + buf[1][j] * buf[1][j] + buf[3][j] * buf[3][j]; q[j] = 0.5 * (buf[1][j] * ue[1][j] + buf[2][j] * ue[2][j] + buf[3][j] * ue[3][j]); } for (j = 1; j <= grid_points[1] - 2; j++) { jm1 = j - 1; jp1 = j + 1; forcing[0][i][j][k] = forcing[0][i][j][k] - ty2 * (ue[2][jp1] - ue[2][jm1]) + dy1ty1 * (ue[0][jp1] - 2.0 * ue[0][j] + ue[0][jm1]); forcing[1][i][j][k] = forcing[1][i][j][k] - ty2 * (ue[1][jp1] * buf[2][jp1] - ue[1][jm1] * buf[2][jm1]) + yycon2 * (buf[1][jp1] - 2.0 * buf[1][j] + buf[1][jm1]) + dy2ty1 * (ue[1][jp1] - 2.0 * ue[1][j] + ue[1][jm1]); forcing[2][i][j][k] = forcing[2][i][j][k] - ty2 * ((ue[2][jp1] * buf[2][jp1] + c2 * (ue[4][jp1] - q[jp1])) - (ue[2][jm1] * buf[2][jm1] + c2 * (ue[4][jm1] - q[jm1]))) + yycon1 * (buf[2][jp1] - 2.0 * buf[2][j] + buf[2][jm1]) + dy3ty1 * (ue[2][jp1] - 2.0 * ue[2][j] + ue[2][jm1]); forcing[3][i][j][k] = forcing[3][i][j][k] - ty2 * (ue[3][jp1] * buf[2][jp1] - ue[3][jm1] * buf[2][jm1]) + yycon2 * (buf[3][jp1] - 2.0 * buf[3][j] + buf[3][jm1]) + dy4ty1 * (ue[3][jp1] - 2.0 * ue[3][j] + ue[3][jm1]); forcing[4][i][j][k] = forcing[4][i][j][k] - ty2 * (buf[2][jp1] * (c1 * ue[4][jp1] - c2 * q[jp1]) - buf[2][jm1] * (c1 * ue[4][jm1] - c2 * q[jm1])) + 0.5 * yycon3 * (buf[0][jp1] - 2.0 * buf[0][j] + buf[0][jm1]) + yycon4 * (cuf[jp1] - 2.0 * cuf[j] + cuf[jm1]) + yycon5 * (buf[4][jp1] - 2.0 * buf[4][j] + buf[4][jm1]) + dy5ty1 * (ue[4][jp1] - 2.0 * ue[4][j] + ue[4][jm1]); } for (m = 0; m < 5; m++) { j = 1; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (5.0 * ue[m][j] - 4.0 * ue[m][j + 1] + ue[m][j + 2]); j = 2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (-4.0 * ue[m][j - 1] + 6.0 * ue[m][j] - 4.0 * ue[m][j + 1] + ue[m][j + 2]); } for (m = 0; m < 5; m++) { for (j = 3; j <= grid_points[1] - 4; j++) { forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][j - 2] - 4.0 * ue[m][j - 1] + 6.0 * ue[m][j] - 4.0 * ue[m][j + 1] + ue[m][j + 2]); } } for (m = 0; m < 5; m++) { j = grid_points[1] - 3; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][j - 2] - 4.0 * ue[m][j - 1] + 6.0 * ue[m][j] - 4.0 * ue[m][j + 1]); j = grid_points[1] - 2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][j - 2] - 4.0 * ue[m][j - 1] + 5.0 * ue[m][j]); } } } for (j = 1; j <= grid_points[1] - 2; j++) { eta = (double) j * dnym1; for (i = 1; i <= grid_points[0] - 2; i++) { xi = (double) i * dnxm1; for (k = 0; k <= grid_points[2] - 1; k++) { zeta = (double) k * dnzm1; exact_solution(xi, eta, zeta, dtemp); for (m = 0; m < 5; m++) { ue[m][k] = dtemp[m]; } dtpp = 1.0 / dtemp[0]; for (m = 1; m < 5; m++) { buf[m][k] = dtpp * dtemp[m]; } cuf[k] = buf[3][k] * buf[3][k]; buf[0][k] = cuf[k] + buf[1][k] * buf[1][k] + buf[2][k] * buf[2][k]; q[k] = 0.5 * (buf[1][k] * ue[1][k] + buf[2][k] * ue[2][k] + buf[3][k] * ue[3][k]); } for (k = 1; k <= grid_points[2] - 2; k++) { km1 = k - 1; kp1 = k + 1; forcing[0][i][j][k] = forcing[0][i][j][k] - tz2 * (ue[3][kp1] - ue[3][km1]) + dz1tz1 * (ue[0][kp1] - 2.0 * ue[0][k] + ue[0][km1]); forcing[1][i][j][k] = forcing[1][i][j][k] - tz2 * (ue[1][kp1] * buf[3][kp1] - ue[1][km1] * buf[3][km1]) + zzcon2 * (buf[1][kp1] - 2.0 * buf[1][k] + buf[1][km1]) + dz2tz1 * (ue[1][kp1] - 2.0 * ue[1][k] + ue[1][km1]); forcing[2][i][j][k] = forcing[2][i][j][k] - tz2 * (ue[2][kp1] * buf[3][kp1] - ue[2][km1] * buf[3][km1]) + zzcon2 * (buf[2][kp1] - 2.0 * buf[2][k] + buf[2][km1]) + dz3tz1 * (ue[2][kp1] - 2.0 * ue[2][k] + ue[2][km1]); forcing[3][i][j][k] = forcing[3][i][j][k] - tz2 * ((ue[3][kp1] * buf[3][kp1] + c2 * (ue[4][kp1] - q[kp1])) - (ue[3][km1] * buf[3][km1] + c2 * (ue[4][km1] - q[km1]))) + zzcon1 * (buf[3][kp1] - 2.0 * buf[3][k] + buf[3][km1]) + dz4tz1 * (ue[3][kp1] - 2.0 * ue[3][k] + ue[3][km1]); forcing[4][i][j][k] = forcing[4][i][j][k] - tz2 * (buf[3][kp1] * (c1 * ue[4][kp1] - c2 * q[kp1]) - buf[3][km1] * (c1 * ue[4][km1] - c2 * q[km1])) + 0.5 * zzcon3 * (buf[0][kp1] - 2.0 * buf[0][k] + buf[0][km1]) + zzcon4 * (cuf[kp1] - 2.0 * cuf[k] + cuf[km1]) + zzcon5 * (buf[4][kp1] - 2.0 * buf[4][k] + buf[4][km1]) + dz5tz1 * (ue[4][kp1] - 2.0 * ue[4][k] + ue[4][km1]); } for (m = 0; m < 5; m++) { k = 1; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (5.0 * ue[m][k] - 4.0 * ue[m][k + 1] + ue[m][k + 2]); k = 2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (-4.0 * ue[m][k - 1] + 6.0 * ue[m][k] - 4.0 * ue[m][k + 1] + ue[m][k + 2]); } for (m = 0; m < 5; m++) { for (k = 3; k <= grid_points[2] - 4; k++) { forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][k - 2] - 4.0 * ue[m][k - 1] + 6.0 * ue[m][k] - 4.0 * ue[m][k + 1] + ue[m][k + 2]); } } for (m = 0; m < 5; m++) { k = grid_points[2] - 3; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][k - 2] - 4.0 * ue[m][k - 1] + 6.0 * ue[m][k] - 4.0 * ue[m][k + 1]); k = grid_points[2] - 2; forcing[m][i][j][k] = forcing[m][i][j][k] - dssp * (ue[m][k - 2] - 4.0 * ue[m][k - 1] + 5.0 * ue[m][k]); } } } for (m = 0; m < 5; m++) { for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { forcing[m][i][j][k] = -1.0 * forcing[m][i][j][k]; } } } } } static void exact_solution(double xi, double eta , double zeta , double dtemp[5]) { int m; for (m = 0; m < 5; m++) { dtemp[m] = ce[0][m] + xi * (ce[1][m] + xi * (ce[4][m] + xi * (ce[7][m] + xi * ce[10][m]))) + eta * (ce[2][m] + eta * (ce[5][m] + eta * (ce[8][m] + eta * ce[11][m]))) + zeta * (ce[3][m] + zeta * (ce[6][m] + zeta * (ce[9][m] + zeta * ce[12][m]))); } } static void initialize(void ) { int i; int j; int k; int m; int ix; int iy; int iz; double xi; double eta; double zeta; double Pface[2][3][5]; double Pxi; double Peta; double Pzeta; double temp[5]; for (i = 0; i <= 12 - 1; i++) { for (j = 0; j <= 12 - 1; j++) { for (k = 0; k <= 12 - 1; k++) { u[0][i][j][k] = 1.0; u[1][i][j][k] = 0.0; u[2][i][j][k] = 0.0; u[3][i][j][k] = 0.0; u[4][i][j][k] = 1.0; } } } for (i = 0; i <= grid_points[0] - 1; i++) { xi = (double) i * dnxm1; for (j = 0; j <= grid_points[1] - 1; j++) { eta = (double) j * dnym1; for (k = 0; k <= grid_points[2] - 1; k++) { zeta = (double) k * dnzm1; for (ix = 0; ix < 2; ix++) { double *_imopVarPre199; double _imopVarPre200; _imopVarPre199 = &Pface[ix][0][0]; _imopVarPre200 = (double) ix; exact_solution(_imopVarPre200, eta, zeta, _imopVarPre199); } for (iy = 0; iy < 2; iy++) { double *_imopVarPre203; double _imopVarPre204; _imopVarPre203 = &Pface[iy][1][0]; _imopVarPre204 = (double) iy; exact_solution(xi, _imopVarPre204, zeta, _imopVarPre203); } for (iz = 0; iz < 2; iz++) { double *_imopVarPre207; double _imopVarPre208; _imopVarPre207 = &Pface[iz][2][0]; _imopVarPre208 = (double) iz; exact_solution(xi, eta, _imopVarPre208, _imopVarPre207); } for (m = 0; m < 5; m++) { Pxi = xi * Pface[1][0][m] + (1.0 - xi) * Pface[0][0][m]; Peta = eta * Pface[1][1][m] + (1.0 - eta) * Pface[0][1][m]; Pzeta = zeta * Pface[1][2][m] + (1.0 - zeta) * Pface[0][2][m]; u[m][i][j][k] = Pxi + Peta + Pzeta - Pxi * Peta - Pxi * Pzeta - Peta * Pzeta + Pxi * Peta * Pzeta; } } } } xi = 0.0; i = 0; for (j = 0; j < grid_points[1]; j++) { eta = (double) j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double) k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } xi = 1.0; i = grid_points[0] - 1; for (j = 0; j < grid_points[1]; j++) { eta = (double) j * dnym1; for (k = 0; k < grid_points[2]; k++) { zeta = (double) k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } eta = 0.0; j = 0; for (i = 0; i < grid_points[0]; i++) { xi = (double) i * dnxm1; for (k = 0; k < grid_points[2]; k++) { zeta = (double) k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } eta = 1.0; j = grid_points[1] - 1; for (i = 0; i < grid_points[0]; i++) { xi = (double) i * dnxm1; for (k = 0; k < grid_points[2]; k++) { zeta = (double) k * dnzm1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } zeta = 0.0; k = 0; for (i = 0; i < grid_points[0]; i++) { xi = (double) i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double) j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } zeta = 1.0; k = grid_points[2] - 1; for (i = 0; i < grid_points[0]; i++) { xi = (double) i * dnxm1; for (j = 0; j < grid_points[1]; j++) { eta = (double) j * dnym1; exact_solution(xi, eta, zeta, temp); for (m = 0; m < 5; m++) { u[m][i][j][k] = temp[m]; } } } } static void lhsinit(void ) { int i; int j; int k; int n; for (n = 0; n < 15; n++) { #pragma omp for nowait for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { lhs[n][i][j][k] = 0.0; } } } } for (n = 0; n < 3; n++) { #pragma omp for nowait for (i = 0; i < grid_points[0]; i++) { for (j = 0; j < grid_points[1]; j++) { for (k = 0; k < grid_points[2]; k++) { lhs[5 * n + 2][i][j][k] = 1.0; } } } } } static void lhsx(void ) { double ru1; int i; int j; int k; for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { #pragma omp for nowait for (i = 0; i <= grid_points[0] - 1; i++) { ru1 = c3c4 * rho_i[i][j][k]; cv[i] = us[i][j][k]; int _imopVarPre719; double _imopVarPre720; int _imopVarPre721; double _imopVarPre722; int _imopVarPre729; double _imopVarPre730; int _imopVarPre731; double _imopVarPre732; int _imopVarPre825; double _imopVarPre826; int _imopVarPre827; double _imopVarPre828; int _imopVarPre835; double _imopVarPre836; _imopVarPre719 = ((dxmax + ru1) > dx1); if (_imopVarPre719) { _imopVarPre720 = (dxmax + ru1); } else { _imopVarPre720 = dx1; } _imopVarPre721 = ((dx5 + c1c5 * ru1) > _imopVarPre720); if (_imopVarPre721) { _imopVarPre722 = (dx5 + c1c5 * ru1); } else { _imopVarPre729 = ((dxmax + ru1) > dx1); if (_imopVarPre729) { _imopVarPre730 = (dxmax + ru1); } else { _imopVarPre730 = dx1; } _imopVarPre722 = _imopVarPre730; } _imopVarPre731 = ((dx2 + con43 * ru1) > _imopVarPre722); if (_imopVarPre731) { _imopVarPre732 = (dx2 + con43 * ru1); } else { _imopVarPre825 = ((dxmax + ru1) > dx1); if (_imopVarPre825) { _imopVarPre826 = (dxmax + ru1); } else { _imopVarPre826 = dx1; } _imopVarPre827 = ((dx5 + c1c5 * ru1) > _imopVarPre826); if (_imopVarPre827) { _imopVarPre828 = (dx5 + c1c5 * ru1); } else { _imopVarPre835 = ((dxmax + ru1) > dx1); if (_imopVarPre835) { _imopVarPre836 = (dxmax + ru1); } else { _imopVarPre836 = dx1; } _imopVarPre828 = _imopVarPre836; } _imopVarPre732 = _imopVarPre828; } rhon[i] = _imopVarPre732; } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f, rhon.f, cv.f]) read([cv, i, dttx2, rhon, lhs.f, dttx1, rhon.f, grid_points.f, c2dttx1, grid_points, lhs, cv.f]) #pragma omp barrier #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { lhs[0][i][j][k] = 0.0; lhs[1][i][j][k] = -dttx2 * cv[i - 1] - dttx1 * rhon[i - 1]; lhs[2][i][j][k] = 1.0 + c2dttx1 * rhon[i]; lhs[3][i][j][k] = dttx2 * cv[i + 1] - dttx1 * rhon[i + 1]; lhs[4][i][j][k] = 0.0; } // #pragma omp dummyFlush BARRIER_START written([lhs.f]) read([_imopVarPre731, j, _imopVarPre721, comz1, rho_i, rhon, _imopVarPre719, lhs.f, comz5, _imopVarPre729, grid_points.f, dxmax, us, dx1, c1c5, dx5, cv, i, _imopVarPre825, comz4, _imopVarPre835, comz6, _imopVarPre827, con43, grid_points, lhs, us.f, c3c4, rho_i.f, dx2]) #pragma omp barrier } } i = 1; #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { lhs[2][i][j][k] = lhs[2][i][j][k] + comz5; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; lhs[1][i + 1][j][k] = lhs[1][i + 1][j][k] - comz4; lhs[2][i + 1][j][k] = lhs[2][i + 1][j][k] + comz6; lhs[3][i + 1][j][k] = lhs[3][i + 1][j][k] - comz4; lhs[4][i + 1][j][k] = lhs[4][i + 1][j][k] + comz1; } } #pragma omp for nowait for (i = 3; i <= grid_points[0] - 4; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; } } } i = grid_points[0] - 3; #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[0][i + 1][j][k] = lhs[0][i + 1][j][k] + comz1; lhs[1][i + 1][j][k] = lhs[1][i + 1][j][k] - comz4; lhs[2][i + 1][j][k] = lhs[2][i + 1][j][k] + comz5; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f, rhon.f, cv.f]) read([i, dttx2, lhs.f, speed, grid_points.f, grid_points, speed.f, lhs]) #pragma omp barrier #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { lhs[0 + 5][i][j][k] = lhs[0][i][j][k]; lhs[1 + 5][i][j][k] = lhs[1][i][j][k] - dttx2 * speed[i - 1][j][k]; lhs[2 + 5][i][j][k] = lhs[2][i][j][k]; lhs[3 + 5][i][j][k] = lhs[3][i][j][k] + dttx2 * speed[i + 1][j][k]; lhs[4 + 5][i][j][k] = lhs[4][i][j][k]; lhs[0 + 10][i][j][k] = lhs[0][i][j][k]; lhs[1 + 10][i][j][k] = lhs[1][i][j][k] + dttx2 * speed[i - 1][j][k]; lhs[2 + 10][i][j][k] = lhs[2][i][j][k]; lhs[3 + 10][i][j][k] = lhs[3][i][j][k] - dttx2 * speed[i + 1][j][k]; lhs[4 + 10][i][j][k] = lhs[4][i][j][k]; } } } // #pragma omp dummyFlush BARRIER_START written([lhs.f]) read([rhs.f, rhs, lhs.f, j, grid_points.f, grid_points, lhs]) #pragma omp barrier } static void lhsy(void ) { double ru1; int i; int j; int k; for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { #pragma omp for nowait for (j = 0; j <= grid_points[1] - 1; j++) { ru1 = c3c4 * rho_i[i][j][k]; cv[j] = vs[i][j][k]; int _imopVarPre1347; double _imopVarPre1348; int _imopVarPre1349; double _imopVarPre1350; int _imopVarPre1357; double _imopVarPre1358; int _imopVarPre1359; double _imopVarPre1360; int _imopVarPre1453; double _imopVarPre1454; int _imopVarPre1455; double _imopVarPre1456; int _imopVarPre1463; double _imopVarPre1464; _imopVarPre1347 = ((dymax + ru1) > dy1); if (_imopVarPre1347) { _imopVarPre1348 = (dymax + ru1); } else { _imopVarPre1348 = dy1; } _imopVarPre1349 = ((dy5 + c1c5 * ru1) > _imopVarPre1348); if (_imopVarPre1349) { _imopVarPre1350 = (dy5 + c1c5 * ru1); } else { _imopVarPre1357 = ((dymax + ru1) > dy1); if (_imopVarPre1357) { _imopVarPre1358 = (dymax + ru1); } else { _imopVarPre1358 = dy1; } _imopVarPre1350 = _imopVarPre1358; } _imopVarPre1359 = ((dy3 + con43 * ru1) > _imopVarPre1350); if (_imopVarPre1359) { _imopVarPre1360 = (dy3 + con43 * ru1); } else { _imopVarPre1453 = ((dymax + ru1) > dy1); if (_imopVarPre1453) { _imopVarPre1454 = (dymax + ru1); } else { _imopVarPre1454 = dy1; } _imopVarPre1455 = ((dy5 + c1c5 * ru1) > _imopVarPre1454); if (_imopVarPre1455) { _imopVarPre1456 = (dy5 + c1c5 * ru1); } else { _imopVarPre1463 = ((dymax + ru1) > dy1); if (_imopVarPre1463) { _imopVarPre1464 = (dymax + ru1); } else { _imopVarPre1464 = dy1; } _imopVarPre1456 = _imopVarPre1464; } _imopVarPre1360 = _imopVarPre1456; } rhoq[j] = _imopVarPre1360; } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f, rhoq.f, cv.f]) read([cv, j, lhs.f, dtty1, dtty2, rhoq.f, grid_points.f, rhoq, grid_points, lhs, cv.f, c2dtty1]) #pragma omp barrier #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { lhs[0][i][j][k] = 0.0; lhs[1][i][j][k] = -dtty2 * cv[j - 1] - dtty1 * rhoq[j - 1]; lhs[2][i][j][k] = 1.0 + c2dtty1 * rhoq[j]; lhs[3][i][j][k] = dtty2 * cv[j + 1] - dtty1 * rhoq[j + 1]; lhs[4][i][j][k] = 0.0; } // #pragma omp dummyFlush BARRIER_START written([lhs.f]) read([comz1, rho_i, lhs.f, comz5, grid_points.f, vs, j, c1c5, dy1, dy3, _imopVarPre1349, dy5, _imopVarPre1359, _imopVarPre1347, _imopVarPre1357, cv, dymax, comz4, comz6, con43, grid_points, rhoq, lhs, vs.f, _imopVarPre1455, i, c3c4, _imopVarPre1453, rho_i.f, _imopVarPre1463]) #pragma omp barrier } } j = 1; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { lhs[2][i][j][k] = lhs[2][i][j][k] + comz5; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; lhs[1][i][j + 1][k] = lhs[1][i][j + 1][k] - comz4; lhs[2][i][j + 1][k] = lhs[2][i][j + 1][k] + comz6; lhs[3][i][j + 1][k] = lhs[3][i][j + 1][k] - comz4; lhs[4][i][j + 1][k] = lhs[4][i][j + 1][k] + comz1; } } #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 3; j <= grid_points[1] - 4; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; } } } j = grid_points[1] - 3; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[0][i][j + 1][k] = lhs[0][i][j + 1][k] + comz1; lhs[1][i][j + 1][k] = lhs[1][i][j + 1][k] - comz4; lhs[2][i][j + 1][k] = lhs[2][i][j + 1][k] + comz5; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f, rhoq.f, cv.f]) read([lhs.f, i, speed, dtty2, grid_points.f, grid_points, speed.f, lhs]) #pragma omp barrier #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { lhs[0 + 5][i][j][k] = lhs[0][i][j][k]; lhs[1 + 5][i][j][k] = lhs[1][i][j][k] - dtty2 * speed[i][j - 1][k]; lhs[2 + 5][i][j][k] = lhs[2][i][j][k]; lhs[3 + 5][i][j][k] = lhs[3][i][j][k] + dtty2 * speed[i][j + 1][k]; lhs[4 + 5][i][j][k] = lhs[4][i][j][k]; lhs[0 + 10][i][j][k] = lhs[0][i][j][k]; lhs[1 + 10][i][j][k] = lhs[1][i][j][k] + dtty2 * speed[i][j - 1][k]; lhs[2 + 10][i][j][k] = lhs[2][i][j][k]; lhs[3 + 10][i][j][k] = lhs[3][i][j][k] - dtty2 * speed[i][j + 1][k]; lhs[4 + 10][i][j][k] = lhs[4][i][j][k]; } } } // #pragma omp dummyFlush BARRIER_START written([lhs.f]) read([rhs.f, i, rhs, lhs.f, grid_points.f, grid_points, lhs]) #pragma omp barrier } static void lhsz(void ) { double ru1; int i; int j; int k; for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { #pragma omp for nowait for (k = 0; k <= grid_points[2] - 1; k++) { ru1 = c3c4 * rho_i[i][j][k]; cv[k] = ws[i][j][k]; int _imopVarPre1975; double _imopVarPre1976; int _imopVarPre1977; double _imopVarPre1978; int _imopVarPre1985; double _imopVarPre1986; int _imopVarPre1987; double _imopVarPre1988; int _imopVarPre2081; double _imopVarPre2082; int _imopVarPre2083; double _imopVarPre2084; int _imopVarPre2091; double _imopVarPre2092; _imopVarPre1975 = ((dzmax + ru1) > dz1); if (_imopVarPre1975) { _imopVarPre1976 = (dzmax + ru1); } else { _imopVarPre1976 = dz1; } _imopVarPre1977 = ((dz5 + c1c5 * ru1) > _imopVarPre1976); if (_imopVarPre1977) { _imopVarPre1978 = (dz5 + c1c5 * ru1); } else { _imopVarPre1985 = ((dzmax + ru1) > dz1); if (_imopVarPre1985) { _imopVarPre1986 = (dzmax + ru1); } else { _imopVarPre1986 = dz1; } _imopVarPre1978 = _imopVarPre1986; } _imopVarPre1987 = ((dz4 + con43 * ru1) > _imopVarPre1978); if (_imopVarPre1987) { _imopVarPre1988 = (dz4 + con43 * ru1); } else { _imopVarPre2081 = ((dzmax + ru1) > dz1); if (_imopVarPre2081) { _imopVarPre2082 = (dzmax + ru1); } else { _imopVarPre2082 = dz1; } _imopVarPre2083 = ((dz5 + c1c5 * ru1) > _imopVarPre2082); if (_imopVarPre2083) { _imopVarPre2084 = (dz5 + c1c5 * ru1); } else { _imopVarPre2091 = ((dzmax + ru1) > dz1); if (_imopVarPre2091) { _imopVarPre2092 = (dzmax + ru1); } else { _imopVarPre2092 = dz1; } _imopVarPre2084 = _imopVarPre2092; } _imopVarPre1988 = _imopVarPre2084; } rhos[k] = _imopVarPre1988; } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f, rhos.f, cv.f]) read([cv, k, dttz1, lhs.f, dttz2, grid_points.f, grid_points, lhs, rhos.f, c2dttz1, cv.f, rhos]) #pragma omp barrier #pragma omp for nowait for (k = 1; k <= grid_points[2] - 2; k++) { lhs[0][i][j][k] = 0.0; lhs[1][i][j][k] = -dttz2 * cv[k - 1] - dttz1 * rhos[k - 1]; lhs[2][i][j][k] = 1.0 + c2dttz1 * rhos[k]; lhs[3][i][j][k] = dttz2 * cv[k + 1] - dttz1 * rhos[k + 1]; lhs[4][i][j][k] = 0.0; } // #pragma omp dummyFlush BARRIER_START written([lhs.f]) read([k, rho_i, comz1, i, lhs.f, comz5, grid_points.f, ws, dz1, c1c5, dz5, cv, comz4, comz6, dzmax, con43, grid_points, lhs, rhos, _imopVarPre1977, _imopVarPre1987, _imopVarPre1975, _imopVarPre1985, ws.f, c3c4, _imopVarPre2081, rho_i.f, _imopVarPre2083, dz4, _imopVarPre2091]) #pragma omp barrier } } k = 1; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { lhs[2][i][j][k] = lhs[2][i][j][k] + comz5; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; lhs[1][i][j][k + 1] = lhs[1][i][j][k + 1] - comz4; lhs[2][i][j][k + 1] = lhs[2][i][j][k + 1] + comz6; lhs[3][i][j][k + 1] = lhs[3][i][j][k + 1] - comz4; lhs[4][i][j][k + 1] = lhs[4][i][j][k + 1] + comz1; } } #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 3; k <= grid_points[2] - 4; k++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[4][i][j][k] = lhs[4][i][j][k] + comz1; } } } k = grid_points[2] - 3; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { lhs[0][i][j][k] = lhs[0][i][j][k] + comz1; lhs[1][i][j][k] = lhs[1][i][j][k] - comz4; lhs[2][i][j][k] = lhs[2][i][j][k] + comz6; lhs[3][i][j][k] = lhs[3][i][j][k] - comz4; lhs[0][i][j][k + 1] = lhs[0][i][j][k + 1] + comz1; lhs[1][i][j][k + 1] = lhs[1][i][j][k + 1] - comz4; lhs[2][i][j][k + 1] = lhs[2][i][j][k + 1] + comz5; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f, rhos.f, cv.f]) read([i, lhs.f, dttz2, speed, grid_points.f, grid_points, speed.f, lhs]) #pragma omp barrier #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { lhs[0 + 5][i][j][k] = lhs[0][i][j][k]; lhs[1 + 5][i][j][k] = lhs[1][i][j][k] - dttz2 * speed[i][j][k - 1]; lhs[2 + 5][i][j][k] = lhs[2][i][j][k]; lhs[3 + 5][i][j][k] = lhs[3][i][j][k] + dttz2 * speed[i][j][k + 1]; lhs[4 + 5][i][j][k] = lhs[4][i][j][k]; lhs[0 + 10][i][j][k] = lhs[0][i][j][k]; lhs[1 + 10][i][j][k] = lhs[1][i][j][k] + dttz2 * speed[i][j][k - 1]; lhs[2 + 10][i][j][k] = lhs[2][i][j][k]; lhs[3 + 10][i][j][k] = lhs[3][i][j][k] - dttz2 * speed[i][j][k + 1]; lhs[4 + 10][i][j][k] = lhs[4][i][j][k]; } } } // #pragma omp dummyFlush BARRIER_START written([lhs.f]) read([rhs.f, rhs, i, lhs.f, grid_points.f, grid_points, lhs]) #pragma omp barrier } static void ninvr(void ) { int i; int j; int k; double r1; double r2; double r3; double r4; double r5; double t1; double t2; #pragma omp parallel default(shared) private(i, j, k, r1, r2, r3, r4, r5, t1, t2) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { r1 = rhs[0][i][j][k]; r2 = rhs[1][i][j][k]; r3 = rhs[2][i][j][k]; r4 = rhs[3][i][j][k]; r5 = rhs[4][i][j][k]; t1 = bt * r3; t2 = 0.5 * (r4 + r5); rhs[0][i][j][k] = -r2; rhs[1][i][j][k] = r1; rhs[2][i][j][k] = bt * (r4 - r5); rhs[3][i][j][k] = -t1 + t2; rhs[4][i][j][k] = t1 + t2; } } } } } static void pinvr(void ) { int i; int j; int k; double r1; double r2; double r3; double r4; double r5; double t1; double t2; #pragma omp parallel default(shared) private(i, j, k, r1, r2, r3, r4, r5, t1, t2) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { r1 = rhs[0][i][j][k]; r2 = rhs[1][i][j][k]; r3 = rhs[2][i][j][k]; r4 = rhs[3][i][j][k]; r5 = rhs[4][i][j][k]; t1 = bt * r1; t2 = 0.5 * (r4 + r5); rhs[0][i][j][k] = bt * (r4 - r5); rhs[1][i][j][k] = -r3; rhs[2][i][j][k] = r2; rhs[3][i][j][k] = -t1 + t2; rhs[4][i][j][k] = t1 + t2; } } } } } static void compute_rhs(void ) { #pragma omp parallel { int i; int j; int k; int m; double aux; double rho_inv; double uijk; double up1; double um1; double vijk; double vp1; double vm1; double wijk; double wp1; double wm1; #pragma omp for nowait for (i = 0; i <= grid_points[0] - 1; i++) { for (j = 0; j <= grid_points[1] - 1; j++) { for (k = 0; k <= grid_points[2] - 1; k++) { rho_inv = 1.0 / u[0][i][j][k]; rho_i[i][j][k] = rho_inv; us[i][j][k] = u[1][i][j][k] * rho_inv; vs[i][j][k] = u[2][i][j][k] * rho_inv; ws[i][j][k] = u[3][i][j][k] * rho_inv; square[i][j][k] = 0.5 * (u[1][i][j][k] * u[1][i][j][k] + u[2][i][j][k] * u[2][i][j][k] + u[3][i][j][k] * u[3][i][j][k]) * rho_inv; qs[i][j][k] = square[i][j][k] * rho_inv; aux = c1c2 * rho_inv * (u[4][i][j][k] - square[i][j][k]); aux = sqrt(aux); speed[i][j][k] = aux; ainv[i][j][k] = 1.0 / aux; } } } for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 0; i <= grid_points[0] - 1; i++) { for (j = 0; j <= grid_points[1] - 1; j++) { for (k = 0; k <= grid_points[2] - 1; k++) { rhs[m][i][j][k] = forcing[m][i][j][k]; } } } } #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { uijk = us[i][j][k]; up1 = us[i + 1][j][k]; um1 = us[i - 1][j][k]; rhs[0][i][j][k] = rhs[0][i][j][k] + dx1tx1 * (u[0][i + 1][j][k] - 2.0 * u[0][i][j][k] + u[0][i - 1][j][k]) - tx2 * (u[1][i + 1][j][k] - u[1][i - 1][j][k]); rhs[1][i][j][k] = rhs[1][i][j][k] + dx2tx1 * (u[1][i + 1][j][k] - 2.0 * u[1][i][j][k] + u[1][i - 1][j][k]) + xxcon2 * con43 * (up1 - 2.0 * uijk + um1) - tx2 * (u[1][i + 1][j][k] * up1 - u[1][i - 1][j][k] * um1 + (u[4][i + 1][j][k] - square[i + 1][j][k] - u[4][i - 1][j][k] + square[i - 1][j][k]) * c2); rhs[2][i][j][k] = rhs[2][i][j][k] + dx3tx1 * (u[2][i + 1][j][k] - 2.0 * u[2][i][j][k] + u[2][i - 1][j][k]) + xxcon2 * (vs[i + 1][j][k] - 2.0 * vs[i][j][k] + vs[i - 1][j][k]) - tx2 * (u[2][i + 1][j][k] * up1 - u[2][i - 1][j][k] * um1); rhs[3][i][j][k] = rhs[3][i][j][k] + dx4tx1 * (u[3][i + 1][j][k] - 2.0 * u[3][i][j][k] + u[3][i - 1][j][k]) + xxcon2 * (ws[i + 1][j][k] - 2.0 * ws[i][j][k] + ws[i - 1][j][k]) - tx2 * (u[3][i + 1][j][k] * up1 - u[3][i - 1][j][k] * um1); rhs[4][i][j][k] = rhs[4][i][j][k] + dx5tx1 * (u[4][i + 1][j][k] - 2.0 * u[4][i][j][k] + u[4][i - 1][j][k]) + xxcon3 * (qs[i + 1][j][k] - 2.0 * qs[i][j][k] + qs[i - 1][j][k]) + xxcon4 * (up1 * up1 - 2.0 * uijk * uijk + um1 * um1) + xxcon5 * (u[4][i + 1][j][k] * rho_i[i + 1][j][k] - 2.0 * u[4][i][j][k] * rho_i[i][j][k] + u[4][i - 1][j][k] * rho_i[i - 1][j][k]) - tx2 * ((c1 * u[4][i + 1][j][k] - c2 * square[i + 1][j][k]) * up1 - (c1 * u[4][i - 1][j][k] - c2 * square[i - 1][j][k]) * um1); } } } i = 1; for (m = 0; m < 5; m++) { #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (5.0 * u[m][i][j][k] - 4.0 * u[m][i + 1][j][k] + u[m][i + 2][j][k]); } } // #pragma omp dummyFlush BARRIER_START written([c5, Pface.f, u_exact.f, comz5, dy3ty1, rhoq.f, yycon2, ty1, tz3, c1345, xxcon4, dz3, dy1, dtdssp, conz1, comz4, dttx2, zzcon1, dzmax, c2dttz1, dx2tx1, qs.f, tz2, dy1ty1, dnzm1, us.f, yycon1, xxcon5, dz2, c3c4, c1c2, speed.f, q.f, dx4tx1, dttz1, lhs.f, xce.f, square.f, grid_points.f, zzcon2, ty3, dz2tz1, tx1, xxcon2, dx1, dz5, dy3, c2iv, dt, c5dssp, comz6, dnym1, rhon.f, zzcon3, cuf.f, c3c4tx3, ty2, xxcon3, dz4tz1, dy2, dz4, rhs.f, ue.f, dy2ty1, c1, comz1, dtty1, temp.f, zzcon4, tx3, con16, c1c5, dy4ty1, dx3, forcing.f, dy5, dx1tx1, dymax, c2, ainv.f, dttz2, con43, c2dttx1, zzcon5, tx2, c3c4ty3, yycon5, xxcon1, ws.f, dy4, rho_i.f, dx2, dx3tx1, u.f, c3, dz1tz1, dttx1, dnxm1, c4dssp, rhos.f, dxmax, buf.f, dx5tx1, yycon4, dz1, xcr.f, dx5, tz1, bt, c4, dz3tz1, dtty2, dy5ty1, c2dtty1, ce.f, yycon3, dssp, c3c4tz3, vs.f, dz5tz1, dx4, cv.f]) read([j, rhs.f, dssp, u, rhs, u.f, i, grid_points.f, grid_points]) #pragma omp barrier } i = 2; for (m = 0; m < 5; m++) { #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (-4.0 * u[m][i - 1][j][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i + 1][j][k] + u[m][i + 2][j][k]); } } // #pragma omp dummyFlush BARRIER_START written([c5, Pface.f, u_exact.f, comz5, dy3ty1, rhoq.f, yycon2, ty1, tz3, c1345, xxcon4, dz3, dy1, dtdssp, conz1, comz4, dttx2, zzcon1, dzmax, c2dttz1, dx2tx1, qs.f, tz2, dy1ty1, dnzm1, us.f, yycon1, xxcon5, dz2, c3c4, c1c2, speed.f, q.f, dx4tx1, dttz1, lhs.f, xce.f, square.f, grid_points.f, zzcon2, ty3, dz2tz1, tx1, xxcon2, dx1, dz5, dy3, c2iv, dt, c5dssp, comz6, dnym1, rhon.f, zzcon3, cuf.f, c3c4tx3, ty2, xxcon3, dz4tz1, dy2, dz4, rhs.f, ue.f, dy2ty1, c1, comz1, dtty1, temp.f, zzcon4, tx3, con16, c1c5, dy4ty1, dx3, forcing.f, dy5, dx1tx1, dymax, c2, ainv.f, dttz2, con43, c2dttx1, zzcon5, tx2, c3c4ty3, yycon5, xxcon1, ws.f, dy4, rho_i.f, dx2, dx3tx1, u.f, c3, dz1tz1, dttx1, dnxm1, c4dssp, rhos.f, dxmax, buf.f, dx5tx1, yycon4, dz1, xcr.f, dx5, tz1, bt, c4, dz3tz1, dtty2, dy5ty1, c2dtty1, ce.f, yycon3, dssp, c3c4tz3, vs.f, dz5tz1, dx4, cv.f]) read([j, rhs.f, dssp, u, rhs, u.f, i, grid_points.f, grid_points]) #pragma omp barrier } for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 3 * 1; i <= grid_points[0] - 3 * 1 - 1; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i - 2][j][k] - 4.0 * u[m][i - 1][j][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i + 1][j][k] + u[m][i + 2][j][k]); } } } // #pragma omp dummyFlush BARRIER_START written([c5, Pface.f, u_exact.f, comz5, dy3ty1, rhoq.f, yycon2, ty1, tz3, c1345, xxcon4, dz3, dy1, dtdssp, conz1, comz4, dttx2, zzcon1, dzmax, c2dttz1, dx2tx1, qs.f, tz2, dy1ty1, dnzm1, us.f, yycon1, xxcon5, dz2, c3c4, c1c2, speed.f, q.f, dx4tx1, dttz1, lhs.f, xce.f, square.f, grid_points.f, zzcon2, ty3, dz2tz1, tx1, xxcon2, dx1, dz5, dy3, c2iv, dt, c5dssp, comz6, dnym1, rhon.f, zzcon3, cuf.f, c3c4tx3, ty2, xxcon3, dz4tz1, dy2, dz4, rhs.f, ue.f, dy2ty1, c1, comz1, dtty1, temp.f, zzcon4, tx3, con16, c1c5, dy4ty1, dx3, forcing.f, dy5, dx1tx1, dymax, c2, ainv.f, dttz2, con43, c2dttx1, zzcon5, tx2, c3c4ty3, yycon5, xxcon1, ws.f, dy4, rho_i.f, dx2, dx3tx1, u.f, c3, dz1tz1, dttx1, dnxm1, c4dssp, rhos.f, dxmax, buf.f, dx5tx1, yycon4, dz1, xcr.f, dx5, tz1, bt, c4, dz3tz1, dtty2, dy5ty1, c2dtty1, ce.f, yycon3, dssp, c3c4tz3, vs.f, dz5tz1, dx4, cv.f]) read([j, rhs.f, dssp, u, rhs, u.f, i, grid_points.f, grid_points]) #pragma omp barrier } i = grid_points[0] - 3; for (m = 0; m < 5; m++) { #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i - 2][j][k] - 4.0 * u[m][i - 1][j][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i + 1][j][k]); } } // #pragma omp dummyFlush BARRIER_START written([c5, Pface.f, u_exact.f, comz5, dy3ty1, rhoq.f, yycon2, ty1, tz3, c1345, xxcon4, dz3, dy1, dtdssp, conz1, comz4, dttx2, zzcon1, dzmax, c2dttz1, dx2tx1, qs.f, tz2, dy1ty1, dnzm1, us.f, yycon1, xxcon5, dz2, c3c4, c1c2, speed.f, q.f, dx4tx1, dttz1, lhs.f, xce.f, square.f, grid_points.f, zzcon2, ty3, dz2tz1, tx1, xxcon2, dx1, dz5, dy3, c2iv, dt, c5dssp, comz6, dnym1, rhon.f, zzcon3, cuf.f, c3c4tx3, ty2, xxcon3, dz4tz1, dy2, dz4, rhs.f, ue.f, dy2ty1, c1, comz1, dtty1, temp.f, zzcon4, tx3, con16, c1c5, dy4ty1, dx3, forcing.f, dy5, dx1tx1, dymax, c2, ainv.f, dttz2, con43, c2dttx1, zzcon5, tx2, c3c4ty3, yycon5, xxcon1, ws.f, dy4, rho_i.f, dx2, dx3tx1, u.f, c3, dz1tz1, dttx1, dnxm1, c4dssp, rhos.f, dxmax, buf.f, dx5tx1, yycon4, dz1, xcr.f, dx5, tz1, bt, c4, dz3tz1, dtty2, dy5ty1, c2dtty1, ce.f, yycon3, dssp, c3c4tz3, vs.f, dz5tz1, dx4, cv.f]) read([j, rhs.f, dssp, u, rhs, u.f, grid_points.f, grid_points]) #pragma omp barrier } i = grid_points[0] - 2; for (m = 0; m < 5; m++) { #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i - 2][j][k] - 4.0 * u[m][i - 1][j][k] + 5.0 * u[m][i][j][k]); } } // #pragma omp dummyFlush BARRIER_START written([c5, Pface.f, u_exact.f, comz5, dy3ty1, rhoq.f, yycon2, ty1, tz3, c1345, xxcon4, dz3, dy1, dtdssp, conz1, comz4, dttx2, zzcon1, dzmax, c2dttz1, dx2tx1, qs.f, tz2, dy1ty1, dnzm1, us.f, yycon1, xxcon5, dz2, c3c4, c1c2, speed.f, q.f, dx4tx1, dttz1, lhs.f, xce.f, square.f, grid_points.f, zzcon2, ty3, dz2tz1, tx1, xxcon2, dx1, dz5, dy3, c2iv, dt, c5dssp, comz6, dnym1, rhon.f, zzcon3, cuf.f, c3c4tx3, ty2, xxcon3, dz4tz1, dy2, dz4, rhs.f, ue.f, dy2ty1, c1, comz1, dtty1, temp.f, zzcon4, tx3, con16, c1c5, dy4ty1, dx3, forcing.f, dy5, dx1tx1, dymax, c2, ainv.f, dttz2, con43, c2dttx1, zzcon5, tx2, c3c4ty3, yycon5, xxcon1, ws.f, dy4, rho_i.f, dx2, dx3tx1, u.f, c3, dz1tz1, dttx1, dnxm1, c4dssp, rhos.f, dxmax, buf.f, dx5tx1, yycon4, dz1, xcr.f, dx5, tz1, bt, c4, dz3tz1, dtty2, dy5ty1, c2dtty1, ce.f, yycon3, dssp, c3c4tz3, vs.f, dz5tz1, dx4, cv.f]) read([j, rhs.f, dssp, u, rhs, u.f, grid_points.f, grid_points]) #pragma omp barrier } // #pragma omp dummyFlush BARRIER_START written([c5, Pface.f, u_exact.f, comz5, dy3ty1, rhoq.f, yycon2, ty1, tz3, c1345, xxcon4, dz3, dy1, dtdssp, conz1, comz4, dttx2, zzcon1, dzmax, c2dttz1, dx2tx1, qs.f, tz2, dy1ty1, dnzm1, us.f, yycon1, xxcon5, dz2, c3c4, c1c2, speed.f, q.f, dx4tx1, dttz1, lhs.f, xce.f, square.f, grid_points.f, zzcon2, ty3, dz2tz1, tx1, xxcon2, dx1, dz5, dy3, c2iv, dt, c5dssp, comz6, dnym1, rhon.f, zzcon3, cuf.f, c3c4tx3, ty2, xxcon3, dz4tz1, dy2, dz4, rhs.f, ue.f, dy2ty1, c1, comz1, dtty1, temp.f, zzcon4, tx3, con16, c1c5, dy4ty1, dx3, forcing.f, dy5, dx1tx1, dymax, c2, ainv.f, dttz2, con43, c2dttx1, zzcon5, tx2, c3c4ty3, yycon5, xxcon1, ws.f, dy4, rho_i.f, dx2, dx3tx1, u.f, c3, dz1tz1, dttx1, dnxm1, c4dssp, rhos.f, dxmax, buf.f, dx5tx1, yycon4, dz1, xcr.f, dx5, tz1, bt, c4, dz3tz1, dtty2, dy5ty1, c2dtty1, ce.f, yycon3, dssp, c3c4tz3, vs.f, dz5tz1, dx4, cv.f]) read([verified, comz5, dy3ty1, _imopVarPre2174, qs, lhsx, yycon2, _imopVarPre2166, us, speed, comz4, dttx2, _imopVarPre2173, qs.f, tz2, dy1ty1, _imopVarPre2186, us.f, _imopVarPre2167, c3c4, _imopVarPre2179, speed.f, _imopVarPre731, _imopVarPre2162, lhs.f, square.f, rhs_norm, grid_points.f, zzcon2, _imopVarPre2172, dz2tz1, dx1, sqrt, dt, _imopVarPre835, comz6, i, rhon.f, ninvr, c_print_results, zzcon3, lhs, ty2, dz4tz1, j, txinvr, rhs.f, _imopVarPre721, ainv, dy2ty1, c1, comz1, _imopVarPre2160, printf, rhon, _imopVarPre729, zzcon4, i, _imopVarPre2180, ws, c1c5, dy4ty1, m, rhs, c2, _imopVarPre825, _imopVarPre2161, ainv.f, square, pow, con43, c2dttx1, grid_points, zzcon5, yycon5, x_solve, ws.f, rho_i.f, dx2, u.f, j, i, dz1tz1, rho_i, _imopVarPre719, dttx1, dxmax, _imopVarPre2185, yycon4, _imopVarPre2168, vs, _imopVarPre2178, dx5, xcr.f, cv, u, bt, i, dz3tz1, _imopVarPre827, fabs, dy5ty1, yycon3, dssp, vs.f, _imopVarPre2184, dz5tz1, cv.f]) #pragma omp barrier #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { vijk = vs[i][j][k]; vp1 = vs[i][j + 1][k]; vm1 = vs[i][j - 1][k]; rhs[0][i][j][k] = rhs[0][i][j][k] + dy1ty1 * (u[0][i][j + 1][k] - 2.0 * u[0][i][j][k] + u[0][i][j - 1][k]) - ty2 * (u[2][i][j + 1][k] - u[2][i][j - 1][k]); rhs[1][i][j][k] = rhs[1][i][j][k] + dy2ty1 * (u[1][i][j + 1][k] - 2.0 * u[1][i][j][k] + u[1][i][j - 1][k]) + yycon2 * (us[i][j + 1][k] - 2.0 * us[i][j][k] + us[i][j - 1][k]) - ty2 * (u[1][i][j + 1][k] * vp1 - u[1][i][j - 1][k] * vm1); rhs[2][i][j][k] = rhs[2][i][j][k] + dy3ty1 * (u[2][i][j + 1][k] - 2.0 * u[2][i][j][k] + u[2][i][j - 1][k]) + yycon2 * con43 * (vp1 - 2.0 * vijk + vm1) - ty2 * (u[2][i][j + 1][k] * vp1 - u[2][i][j - 1][k] * vm1 + (u[4][i][j + 1][k] - square[i][j + 1][k] - u[4][i][j - 1][k] + square[i][j - 1][k]) * c2); rhs[3][i][j][k] = rhs[3][i][j][k] + dy4ty1 * (u[3][i][j + 1][k] - 2.0 * u[3][i][j][k] + u[3][i][j - 1][k]) + yycon2 * (ws[i][j + 1][k] - 2.0 * ws[i][j][k] + ws[i][j - 1][k]) - ty2 * (u[3][i][j + 1][k] * vp1 - u[3][i][j - 1][k] * vm1); rhs[4][i][j][k] = rhs[4][i][j][k] + dy5ty1 * (u[4][i][j + 1][k] - 2.0 * u[4][i][j][k] + u[4][i][j - 1][k]) + yycon3 * (qs[i][j + 1][k] - 2.0 * qs[i][j][k] + qs[i][j - 1][k]) + yycon4 * (vp1 * vp1 - 2.0 * vijk * vijk + vm1 * vm1) + yycon5 * (u[4][i][j + 1][k] * rho_i[i][j + 1][k] - 2.0 * u[4][i][j][k] * rho_i[i][j][k] + u[4][i][j - 1][k] * rho_i[i][j - 1][k]) - ty2 * ((c1 * u[4][i][j + 1][k] - c2 * square[i][j + 1][k]) * vp1 - (c1 * u[4][i][j - 1][k] - c2 * square[i][j - 1][k]) * vm1); } } } j = 1; for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (5.0 * u[m][i][j][k] - 4.0 * u[m][i][j + 1][k] + u[m][i][j + 2][k]); } } } j = 2; for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (-4.0 * u[m][i][j - 1][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j + 1][k] + u[m][i][j + 2][k]); } } } for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 3 * 1; j <= grid_points[1] - 3 * 1 - 1; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j - 2][k] - 4.0 * u[m][i][j - 1][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j + 1][k] + u[m][i][j + 2][k]); } } } } j = grid_points[1] - 3; for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j - 2][k] - 4.0 * u[m][i][j - 1][k] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j + 1][k]); } } } j = grid_points[1] - 2; for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j - 2][k] - 4.0 * u[m][i][j - 1][k] + 5.0 * u[m][i][j][k]); } } } #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { wijk = ws[i][j][k]; wp1 = ws[i][j][k + 1]; wm1 = ws[i][j][k - 1]; rhs[0][i][j][k] = rhs[0][i][j][k] + dz1tz1 * (u[0][i][j][k + 1] - 2.0 * u[0][i][j][k] + u[0][i][j][k - 1]) - tz2 * (u[3][i][j][k + 1] - u[3][i][j][k - 1]); rhs[1][i][j][k] = rhs[1][i][j][k] + dz2tz1 * (u[1][i][j][k + 1] - 2.0 * u[1][i][j][k] + u[1][i][j][k - 1]) + zzcon2 * (us[i][j][k + 1] - 2.0 * us[i][j][k] + us[i][j][k - 1]) - tz2 * (u[1][i][j][k + 1] * wp1 - u[1][i][j][k - 1] * wm1); rhs[2][i][j][k] = rhs[2][i][j][k] + dz3tz1 * (u[2][i][j][k + 1] - 2.0 * u[2][i][j][k] + u[2][i][j][k - 1]) + zzcon2 * (vs[i][j][k + 1] - 2.0 * vs[i][j][k] + vs[i][j][k - 1]) - tz2 * (u[2][i][j][k + 1] * wp1 - u[2][i][j][k - 1] * wm1); rhs[3][i][j][k] = rhs[3][i][j][k] + dz4tz1 * (u[3][i][j][k + 1] - 2.0 * u[3][i][j][k] + u[3][i][j][k - 1]) + zzcon2 * con43 * (wp1 - 2.0 * wijk + wm1) - tz2 * (u[3][i][j][k + 1] * wp1 - u[3][i][j][k - 1] * wm1 + (u[4][i][j][k + 1] - square[i][j][k + 1] - u[4][i][j][k - 1] + square[i][j][k - 1]) * c2); rhs[4][i][j][k] = rhs[4][i][j][k] + dz5tz1 * (u[4][i][j][k + 1] - 2.0 * u[4][i][j][k] + u[4][i][j][k - 1]) + zzcon3 * (qs[i][j][k + 1] - 2.0 * qs[i][j][k] + qs[i][j][k - 1]) + zzcon4 * (wp1 * wp1 - 2.0 * wijk * wijk + wm1 * wm1) + zzcon5 * (u[4][i][j][k + 1] * rho_i[i][j][k + 1] - 2.0 * u[4][i][j][k] * rho_i[i][j][k] + u[4][i][j][k - 1] * rho_i[i][j][k - 1]) - tz2 * ((c1 * u[4][i][j][k + 1] - c2 * square[i][j][k + 1]) * wp1 - (c1 * u[4][i][j][k - 1] - c2 * square[i][j][k - 1]) * wm1); } } } k = 1; for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (5.0 * u[m][i][j][k] - 4.0 * u[m][i][j][k + 1] + u[m][i][j][k + 2]); } } } k = 2; for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (-4.0 * u[m][i][j][k - 1] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j][k + 1] + u[m][i][j][k + 2]); } } } for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 3 * 1; k <= grid_points[2] - 3 * 1 - 1; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j][k - 2] - 4.0 * u[m][i][j][k - 1] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j][k + 1] + u[m][i][j][k + 2]); } } } } k = grid_points[2] - 3; for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j][k - 2] - 4.0 * u[m][i][j][k - 1] + 6.0 * u[m][i][j][k] - 4.0 * u[m][i][j][k + 1]); } } } k = grid_points[2] - 2; for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k] - dssp * (u[m][i][j][k - 2] - 4.0 * u[m][i][j][k - 1] + 5.0 * u[m][i][j][k]); } } } for (m = 0; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] * dt; } } } } } } static void set_constants(void ) { ce[0][0] = 2.0; ce[1][0] = 0.0; ce[2][0] = 0.0; ce[3][0] = 4.0; ce[4][0] = 5.0; ce[5][0] = 3.0; ce[6][0] = 0.5; ce[7][0] = 0.02; ce[8][0] = 0.01; ce[9][0] = 0.03; ce[10][0] = 0.5; ce[11][0] = 0.4; ce[12][0] = 0.3; ce[0][1] = 1.0; ce[1][1] = 0.0; ce[2][1] = 0.0; ce[3][1] = 0.0; ce[4][1] = 1.0; ce[5][1] = 2.0; ce[6][1] = 3.0; ce[7][1] = 0.01; ce[8][1] = 0.03; ce[9][1] = 0.02; ce[10][1] = 0.4; ce[11][1] = 0.3; ce[12][1] = 0.5; ce[0][2] = 2.0; ce[1][2] = 2.0; ce[2][2] = 0.0; ce[3][2] = 0.0; ce[4][2] = 0.0; ce[5][2] = 2.0; ce[6][2] = 3.0; ce[7][2] = 0.04; ce[8][2] = 0.03; ce[9][2] = 0.05; ce[10][2] = 0.3; ce[11][2] = 0.5; ce[12][2] = 0.4; ce[0][3] = 2.0; ce[1][3] = 2.0; ce[2][3] = 0.0; ce[3][3] = 0.0; ce[4][3] = 0.0; ce[5][3] = 2.0; ce[6][3] = 3.0; ce[7][3] = 0.03; ce[8][3] = 0.05; ce[9][3] = 0.04; ce[10][3] = 0.2; ce[11][3] = 0.1; ce[12][3] = 0.3; ce[0][4] = 5.0; ce[1][4] = 4.0; ce[2][4] = 3.0; ce[3][4] = 2.0; ce[4][4] = 0.1; ce[5][4] = 0.4; ce[6][4] = 0.3; ce[7][4] = 0.05; ce[8][4] = 0.04; ce[9][4] = 0.03; ce[10][4] = 0.1; ce[11][4] = 0.3; ce[12][4] = 0.2; c1 = 1.4; c2 = 0.4; c3 = 0.1; c4 = 1.0; c5 = 1.4; bt = sqrt(0.5); dnxm1 = 1.0 / (double) (grid_points[0] - 1); dnym1 = 1.0 / (double) (grid_points[1] - 1); dnzm1 = 1.0 / (double) (grid_points[2] - 1); c1c2 = c1 * c2; c1c5 = c1 * c5; c3c4 = c3 * c4; c1345 = c1c5 * c3c4; conz1 = (1.0 - c1c5); tx1 = 1.0 / (dnxm1 * dnxm1); tx2 = 1.0 / (2.0 * dnxm1); tx3 = 1.0 / dnxm1; ty1 = 1.0 / (dnym1 * dnym1); ty2 = 1.0 / (2.0 * dnym1); ty3 = 1.0 / dnym1; tz1 = 1.0 / (dnzm1 * dnzm1); tz2 = 1.0 / (2.0 * dnzm1); tz3 = 1.0 / dnzm1; dx1 = 0.75; dx2 = 0.75; dx3 = 0.75; dx4 = 0.75; dx5 = 0.75; dy1 = 0.75; dy2 = 0.75; dy3 = 0.75; dy4 = 0.75; dy5 = 0.75; dz1 = 1.0; dz2 = 1.0; dz3 = 1.0; dz4 = 1.0; dz5 = 1.0; int _imopVarPre2095; double _imopVarPre2096; _imopVarPre2095 = (dx3 > dx4); if (_imopVarPre2095) { _imopVarPre2096 = dx3; } else { _imopVarPre2096 = dx4; } dxmax = _imopVarPre2096; int _imopVarPre2099; double _imopVarPre2100; _imopVarPre2099 = (dy2 > dy4); if (_imopVarPre2099) { _imopVarPre2100 = dy2; } else { _imopVarPre2100 = dy4; } dymax = _imopVarPre2100; int _imopVarPre2103; double _imopVarPre2104; _imopVarPre2103 = (dz2 > dz3); if (_imopVarPre2103) { _imopVarPre2104 = dz2; } else { _imopVarPre2104 = dz3; } dzmax = _imopVarPre2104; int _imopVarPre2145; double _imopVarPre2146; int _imopVarPre2147; double _imopVarPre2148; int _imopVarPre2155; double _imopVarPre2156; _imopVarPre2145 = (dy1 > dz1); if (_imopVarPre2145) { _imopVarPre2146 = dy1; } else { _imopVarPre2146 = dz1; } _imopVarPre2147 = (dx1 > _imopVarPre2146); if (_imopVarPre2147) { _imopVarPre2148 = dx1; } else { _imopVarPre2155 = (dy1 > dz1); if (_imopVarPre2155) { _imopVarPre2156 = dy1; } else { _imopVarPre2156 = dz1; } _imopVarPre2148 = _imopVarPre2156; } dssp = 0.25 * _imopVarPre2148; c4dssp = 4.0 * dssp; c5dssp = 5.0 * dssp; dttx1 = dt * tx1; dttx2 = dt * tx2; dtty1 = dt * ty1; dtty2 = dt * ty2; dttz1 = dt * tz1; dttz2 = dt * tz2; c2dttx1 = 2.0 * dttx1; c2dtty1 = 2.0 * dtty1; c2dttz1 = 2.0 * dttz1; dtdssp = dt * dssp; comz1 = dtdssp; comz4 = 4.0 * dtdssp; comz5 = 5.0 * dtdssp; comz6 = 6.0 * dtdssp; c3c4tx3 = c3c4 * tx3; c3c4ty3 = c3c4 * ty3; c3c4tz3 = c3c4 * tz3; dx1tx1 = dx1 * tx1; dx2tx1 = dx2 * tx1; dx3tx1 = dx3 * tx1; dx4tx1 = dx4 * tx1; dx5tx1 = dx5 * tx1; dy1ty1 = dy1 * ty1; dy2ty1 = dy2 * ty1; dy3ty1 = dy3 * ty1; dy4ty1 = dy4 * ty1; dy5ty1 = dy5 * ty1; dz1tz1 = dz1 * tz1; dz2tz1 = dz2 * tz1; dz3tz1 = dz3 * tz1; dz4tz1 = dz4 * tz1; dz5tz1 = dz5 * tz1; c2iv = 2.5; con43 = 4.0 / 3.0; con16 = 1.0 / 6.0; xxcon1 = c3c4tx3 * con43 * tx3; xxcon2 = c3c4tx3 * tx3; xxcon3 = c3c4tx3 * conz1 * tx3; xxcon4 = c3c4tx3 * con16 * tx3; xxcon5 = c3c4tx3 * c1c5 * tx3; yycon1 = c3c4ty3 * con43 * ty3; yycon2 = c3c4ty3 * ty3; yycon3 = c3c4ty3 * conz1 * ty3; yycon4 = c3c4ty3 * con16 * ty3; yycon5 = c3c4ty3 * c1c5 * ty3; zzcon1 = c3c4tz3 * con43 * tz3; zzcon2 = c3c4tz3 * tz3; zzcon3 = c3c4tz3 * conz1 * tz3; zzcon4 = c3c4tz3 * con16 * tz3; zzcon5 = c3c4tz3 * c1c5 * tz3; } static void txinvr(void ) { int i; int j; int k; double t1; double t2; double t3; double ac; double ru1; double uu; double vv; double ww; double r1; double r2; double r3; double r4; double r5; double ac2inv; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { ru1 = rho_i[i][j][k]; uu = us[i][j][k]; vv = vs[i][j][k]; ww = ws[i][j][k]; ac = speed[i][j][k]; ac2inv = ainv[i][j][k] * ainv[i][j][k]; r1 = rhs[0][i][j][k]; r2 = rhs[1][i][j][k]; r3 = rhs[2][i][j][k]; r4 = rhs[3][i][j][k]; r5 = rhs[4][i][j][k]; t1 = c2 * ac2inv * (qs[i][j][k] * r1 - uu * r2 - vv * r3 - ww * r4 + r5); t2 = bt * ru1 * (uu * r1 - r2); t3 = (bt * ru1 * ac) * t1; rhs[0][i][j][k] = r1 - t1; rhs[1][i][j][k] = -ru1 * (ww * r1 - r4); rhs[2][i][j][k] = ru1 * (vv * r1 - r3); rhs[3][i][j][k] = -t2 + t3; rhs[4][i][j][k] = t2 + t3; } } } } static void tzetar(void ) { int i; int j; int k; double t1; double t2; double t3; double ac; double xvel; double yvel; double zvel; double r1; double r2; double r3; double r4; double r5; double btuz; double acinv; double ac2u; double uzik1; #pragma omp for private(i, j, k, t1, t2, t3, ac, xvel, yvel, zvel, r1, r2, r3, r4, r5, btuz, ac2u, uzik1) nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { xvel = us[i][j][k]; yvel = vs[i][j][k]; zvel = ws[i][j][k]; ac = speed[i][j][k]; acinv = ainv[i][j][k]; ac2u = ac * ac; r1 = rhs[0][i][j][k]; r2 = rhs[1][i][j][k]; r3 = rhs[2][i][j][k]; r4 = rhs[3][i][j][k]; r5 = rhs[4][i][j][k]; uzik1 = u[0][i][j][k]; btuz = bt * uzik1; t1 = btuz * acinv * (r4 + r5); t2 = r3 + t1; t3 = btuz * (r4 - r5); rhs[0][i][j][k] = t2; rhs[1][i][j][k] = -uzik1 * r2 + xvel * t2; rhs[2][i][j][k] = uzik1 * r1 + yvel * t2; rhs[3][i][j][k] = zvel * t2 + t3; rhs[4][i][j][k] = uzik1 * (-xvel * r2 + yvel * r1) + qs[i][j][k] * t2 + c2iv * ac2u * t1 + zvel * t3; } } } } static void verify(int no_time_steps, char *class , boolean *verified) { double xcrref[5]; double xceref[5]; double xcrdif[5]; double xcedif[5]; double epsilon; double xce[5]; double xcr[5]; double dtref; int m; epsilon = 1.0e-08; error_norm(xce); compute_rhs(); rhs_norm(xcr); for (m = 0; m < 5; m++) { xcr[m] = xcr[m] / dt; } *class = 'U'; *verified = 1; for (m = 0; m < 5; m++) { xcrref[m] = 1.0; xceref[m] = 1.0; } int _imopVarPre2160; int _imopVarPre2161; int _imopVarPre2162; _imopVarPre2160 = grid_points[0] == 12; if (_imopVarPre2160) { _imopVarPre2161 = grid_points[1] == 12; if (_imopVarPre2161) { _imopVarPre2162 = grid_points[2] == 12; if (_imopVarPre2162) { _imopVarPre2162 = no_time_steps == 100; } _imopVarPre2161 = _imopVarPre2162; } _imopVarPre2160 = _imopVarPre2161; } if (_imopVarPre2160) { *class = 'S'; dtref = 1.5e-2; xcrref[0] = 2.7470315451339479e-02; xcrref[1] = 1.0360746705285417e-02; xcrref[2] = 1.6235745065095532e-02; xcrref[3] = 1.5840557224455615e-02; xcrref[4] = 3.4849040609362460e-02; xceref[0] = 2.7289258557377227e-05; xceref[1] = 1.0364446640837285e-05; xceref[2] = 1.6154798287166471e-05; xceref[3] = 1.5750704994480102e-05; xceref[4] = 3.4177666183390531e-05; } else { int _imopVarPre2166; int _imopVarPre2167; int _imopVarPre2168; _imopVarPre2166 = grid_points[0] == 36; if (_imopVarPre2166) { _imopVarPre2167 = grid_points[1] == 36; if (_imopVarPre2167) { _imopVarPre2168 = grid_points[2] == 36; if (_imopVarPre2168) { _imopVarPre2168 = no_time_steps == 400; } _imopVarPre2167 = _imopVarPre2168; } _imopVarPre2166 = _imopVarPre2167; } if (_imopVarPre2166) { *class = 'W'; dtref = 1.5e-3; xcrref[0] = 0.1893253733584e-02; xcrref[1] = 0.1717075447775e-03; xcrref[2] = 0.2778153350936e-03; xcrref[3] = 0.2887475409984e-03; xcrref[4] = 0.3143611161242e-02; xceref[0] = 0.7542088599534e-04; xceref[1] = 0.6512852253086e-05; xceref[2] = 0.1049092285688e-04; xceref[3] = 0.1128838671535e-04; xceref[4] = 0.1212845639773e-03; } else { int _imopVarPre2172; int _imopVarPre2173; int _imopVarPre2174; _imopVarPre2172 = grid_points[0] == 64; if (_imopVarPre2172) { _imopVarPre2173 = grid_points[1] == 64; if (_imopVarPre2173) { _imopVarPre2174 = grid_points[2] == 64; if (_imopVarPre2174) { _imopVarPre2174 = no_time_steps == 400; } _imopVarPre2173 = _imopVarPre2174; } _imopVarPre2172 = _imopVarPre2173; } if (_imopVarPre2172) { *class = 'A'; dtref = 1.5e-3; xcrref[0] = 2.4799822399300195; xcrref[1] = 1.1276337964368832; xcrref[2] = 1.5028977888770491; xcrref[3] = 1.4217816211695179; xcrref[4] = 2.1292113035138280; xceref[0] = 1.0900140297820550e-04; xceref[1] = 3.7343951769282091e-05; xceref[2] = 5.0092785406541633e-05; xceref[3] = 4.7671093939528255e-05; xceref[4] = 1.3621613399213001e-04; } else { int _imopVarPre2178; int _imopVarPre2179; int _imopVarPre2180; _imopVarPre2178 = grid_points[0] == 102; if (_imopVarPre2178) { _imopVarPre2179 = grid_points[1] == 102; if (_imopVarPre2179) { _imopVarPre2180 = grid_points[2] == 102; if (_imopVarPre2180) { _imopVarPre2180 = no_time_steps == 400; } _imopVarPre2179 = _imopVarPre2180; } _imopVarPre2178 = _imopVarPre2179; } if (_imopVarPre2178) { *class = 'B'; dtref = 1.0e-3; xcrref[0] = 0.6903293579998e+02; xcrref[1] = 0.3095134488084e+02; xcrref[2] = 0.4103336647017e+02; xcrref[3] = 0.3864769009604e+02; xcrref[4] = 0.5643482272596e+02; xceref[0] = 0.9810006190188e-02; xceref[1] = 0.1022827905670e-02; xceref[2] = 0.1720597911692e-02; xceref[3] = 0.1694479428231e-02; xceref[4] = 0.1847456263981e-01; } else { int _imopVarPre2184; int _imopVarPre2185; int _imopVarPre2186; _imopVarPre2184 = grid_points[0] == 162; if (_imopVarPre2184) { _imopVarPre2185 = grid_points[1] == 162; if (_imopVarPre2185) { _imopVarPre2186 = grid_points[2] == 162; if (_imopVarPre2186) { _imopVarPre2186 = no_time_steps == 400; } _imopVarPre2185 = _imopVarPre2186; } _imopVarPre2184 = _imopVarPre2185; } if (_imopVarPre2184) { *class = 'C'; dtref = 0.67e-3; xcrref[0] = 0.5881691581829e+03; xcrref[1] = 0.2454417603569e+03; xcrref[2] = 0.3293829191851e+03; xcrref[3] = 0.3081924971891e+03; xcrref[4] = 0.4597223799176e+03; xceref[0] = 0.2598120500183e+00; xceref[1] = 0.2590888922315e-01; xceref[2] = 0.5132886416320e-01; xceref[3] = 0.4806073419454e-01; xceref[4] = 0.5483377491301e+00; } else { *verified = 0; } } } } } for (m = 0; m < 5; m++) { double _imopVarPre2188; double _imopVarPre2189; _imopVarPre2188 = (xcr[m] - xcrref[m]) / xcrref[m]; _imopVarPre2189 = fabs(_imopVarPre2188); xcrdif[m] = _imopVarPre2189; double _imopVarPre2191; double _imopVarPre2192; _imopVarPre2191 = (xce[m] - xceref[m]) / xceref[m]; _imopVarPre2192 = fabs(_imopVarPre2191); xcedif[m] = _imopVarPre2192; } if (*class != 'U') { char _imopVarPre2194; _imopVarPre2194 = *class; printf(" Verification being performed for class %1c\n", _imopVarPre2194); printf(" accuracy setting for epsilon = %20.13e\n", epsilon); double _imopVarPre2197; double _imopVarPre2198; _imopVarPre2197 = dt - dtref; _imopVarPre2198 = fabs(_imopVarPre2197); if (_imopVarPre2198 > epsilon) { *verified = 0; *class = 'U'; printf(" DT does not match the reference value of %15.8e\n", dtref); } } else { printf(" Unknown class\n"); } if (*class != 'U') { printf(" Comparison of RMS-norms of residual\n"); } else { printf(" RMS-norms of residual\n"); } for (m = 0; m < 5; m++) { if (*class == 'U') { double _imopVarPre2200; _imopVarPre2200 = xcr[m]; printf(" %2d%20.13e\n", m, _imopVarPre2200); } else { if (xcrdif[m] > epsilon) { *verified = 0; double _imopVarPre2204; double _imopVarPre2205; double _imopVarPre2206; _imopVarPre2204 = xcrdif[m]; _imopVarPre2205 = xcrref[m]; _imopVarPre2206 = xcr[m]; printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n", m, _imopVarPre2206, _imopVarPre2205, _imopVarPre2204); } else { double _imopVarPre2210; double _imopVarPre2211; double _imopVarPre2212; _imopVarPre2210 = xcrdif[m]; _imopVarPre2211 = xcrref[m]; _imopVarPre2212 = xcr[m]; printf(" %2d%20.13e%20.13e%20.13e\n", m, _imopVarPre2212, _imopVarPre2211, _imopVarPre2210); } } } if (*class != 'U') { printf(" Comparison of RMS-norms of solution error\n"); } else { printf(" RMS-norms of solution error\n"); } for (m = 0; m < 5; m++) { if (*class == 'U') { double _imopVarPre2214; _imopVarPre2214 = xce[m]; printf(" %2d%20.13e\n", m, _imopVarPre2214); } else { if (xcedif[m] > epsilon) { *verified = 0; double _imopVarPre2218; double _imopVarPre2219; double _imopVarPre2220; _imopVarPre2218 = xcedif[m]; _imopVarPre2219 = xceref[m]; _imopVarPre2220 = xce[m]; printf(" FAILURE: %2d%20.13e%20.13e%20.13e\n", m, _imopVarPre2220, _imopVarPre2219, _imopVarPre2218); } else { double _imopVarPre2224; double _imopVarPre2225; double _imopVarPre2226; _imopVarPre2224 = xcedif[m]; _imopVarPre2225 = xceref[m]; _imopVarPre2226 = xce[m]; printf(" %2d%20.13e%20.13e%20.13e\n", m, _imopVarPre2226, _imopVarPre2225, _imopVarPre2224); } } } if (*class == 'U') { printf(" No reference values provided\n"); printf(" No verification performed\n"); } else { if (*verified) { printf(" Verification Successful\n"); } else { printf(" Verification failed\n"); } } } static void x_solve(void ) { #pragma omp parallel { int i; int j; int k; int n; int i1; int i2; int m; double fac1; double fac2; lhsx(); n = 0; for (i = 0; i <= grid_points[0] - 3; i++) { i1 = i + 1; i2 = i + 2; #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { fac1 = 1. / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; } lhs[n + 2][i1][j][k] = lhs[n + 2][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 3][i][j][k]; lhs[n + 3][i1][j][k] = lhs[n + 3][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n + 1][i1][j][k] * rhs[m][i][j][k]; } lhs[n + 1][i2][j][k] = lhs[n + 1][i2][j][k] - lhs[n + 0][i2][j][k] * lhs[n + 3][i][j][k]; lhs[n + 2][i2][j][k] = lhs[n + 2][i2][j][k] - lhs[n + 0][i2][j][k] * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i2][j][k] = rhs[m][i2][j][k] - lhs[n + 0][i2][j][k] * rhs[m][i][j][k]; } } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, lhs.f, j, grid_points.f, grid_points, lhs]) #pragma omp barrier } i = grid_points[0] - 2; i1 = grid_points[0] - 1; #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { fac1 = 1.0 / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; } lhs[n + 2][i1][j][k] = lhs[n + 2][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 3][i][j][k]; lhs[n + 3][i1][j][k] = lhs[n + 3][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n + 1][i1][j][k] * rhs[m][i][j][k]; } fac2 = 1. / lhs[n + 2][i1][j][k]; for (m = 0; m < 3; m++) { rhs[m][i1][j][k] = fac2 * rhs[m][i1][j][k]; } } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, comz1, comz5, dtty1, rhoq.f, j, dy1, c1c5, speed, pinvr, dy5, _imopVarPre1359, _imopVarPre1347, m, rhs, dymax, comz4, con43, grid_points, rhoq, i, c3c4, _imopVarPre1453, rho_i.f, speed.f, i, rho_i, i, lhs.f, grid_points.f, vs, dy3, _imopVarPre1349, _imopVarPre1357, cv, bt, comz6, i, ninvr, dtty2, lhs, c2dtty1, lhsy, y_solve, vs.f, _imopVarPre1455, j, _imopVarPre1463, cv.f]) #pragma omp barrier for (m = 3; m < 5; m++) { n = (m - 3 + 1) * 5; for (i = 0; i <= grid_points[0] - 3; i++) { i1 = i + 1; i2 = i + 2; #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { fac1 = 1. / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; lhs[n + 2][i1][j][k] = lhs[n + 2][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 3][i][j][k]; lhs[n + 3][i1][j][k] = lhs[n + 3][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 4][i][j][k]; rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n + 1][i1][j][k] * rhs[m][i][j][k]; lhs[n + 1][i2][j][k] = lhs[n + 1][i2][j][k] - lhs[n + 0][i2][j][k] * lhs[n + 3][i][j][k]; lhs[n + 2][i2][j][k] = lhs[n + 2][i2][j][k] - lhs[n + 0][i2][j][k] * lhs[n + 4][i][j][k]; rhs[m][i2][j][k] = rhs[m][i2][j][k] - lhs[n + 0][i2][j][k] * rhs[m][i][j][k]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, lhs.f, j, grid_points.f, grid_points, lhs]) #pragma omp barrier } i = grid_points[0] - 2; i1 = grid_points[0] - 1; #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { fac1 = 1. / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; lhs[n + 2][i1][j][k] = lhs[n + 2][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 3][i][j][k]; lhs[n + 3][i1][j][k] = lhs[n + 3][i1][j][k] - lhs[n + 1][i1][j][k] * lhs[n + 4][i][j][k]; rhs[m][i1][j][k] = rhs[m][i1][j][k] - lhs[n + 1][i1][j][k] * rhs[m][i][j][k]; fac2 = 1. / lhs[n + 2][i1][j][k]; rhs[m][i1][j][k] = fac2 * rhs[m][i1][j][k]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, comz1, comz5, dtty1, rhoq.f, j, dy1, c1c5, speed, pinvr, dy5, _imopVarPre1359, _imopVarPre1347, m, rhs, dymax, comz4, con43, grid_points, rhoq, i, c3c4, _imopVarPre1453, rho_i.f, speed.f, i, rho_i, i, lhs.f, grid_points.f, vs, dy3, _imopVarPre1349, _imopVarPre1357, cv, bt, comz6, i, ninvr, dtty2, lhs, c2dtty1, lhsy, y_solve, vs.f, _imopVarPre1455, j, _imopVarPre1463, cv.f]) #pragma omp barrier } i = grid_points[0] - 2; i1 = grid_points[0] - 1; n = 0; for (m = 0; m < 3; m++) { #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i1][j][k]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([rhs.f, comz1, comz5, dtty1, rhoq.f, j, dy1, c1c5, speed, pinvr, dy5, _imopVarPre1359, _imopVarPre1347, m, rhs, dymax, comz4, con43, grid_points, rhoq, i, c3c4, _imopVarPre1453, rho_i.f, speed.f, i, rho_i, i, lhs.f, grid_points.f, vs, dy3, _imopVarPre1349, _imopVarPre1357, cv, bt, comz6, i, ninvr, dtty2, lhs, c2dtty1, lhsy, y_solve, vs.f, _imopVarPre1455, j, _imopVarPre1463, cv.f]) #pragma omp barrier } for (m = 3; m < 5; m++) { #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { n = (m - 3 + 1) * 5; rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i1][j][k]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([rhs.f, comz1, comz5, dtty1, rhoq.f, j, dy1, c1c5, speed, pinvr, dy5, _imopVarPre1359, _imopVarPre1347, m, rhs, dymax, comz4, con43, grid_points, rhoq, i, c3c4, _imopVarPre1453, rho_i.f, speed.f, i, rho_i, i, lhs.f, grid_points.f, vs, dy3, _imopVarPre1349, _imopVarPre1357, cv, bt, comz6, i, ninvr, dtty2, lhs, c2dtty1, lhsy, y_solve, vs.f, _imopVarPre1455, j, _imopVarPre1463, cv.f]) #pragma omp barrier } n = 0; for (i = grid_points[0] - 3; i >= 0; i--) { i1 = i + 1; i2 = i + 2; #pragma omp for nowait for (m = 0; m < 3; m++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i1][j][k] - lhs[n + 4][i][j][k] * rhs[m][i2][j][k]; } } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([rhs.f, comz1, comz5, dtty1, rhoq.f, j, dy1, c1c5, speed, pinvr, dy5, _imopVarPre1359, _imopVarPre1347, m, rhs, dymax, comz4, con43, grid_points, rhoq, i, c3c4, _imopVarPre1453, rho_i.f, speed.f, i, rho_i, i, lhs.f, grid_points.f, vs, dy3, _imopVarPre1349, _imopVarPre1357, cv, bt, comz6, i, ninvr, dtty2, lhs, c2dtty1, lhsy, y_solve, vs.f, _imopVarPre1455, j, _imopVarPre1463, cv.f]) #pragma omp barrier } for (m = 3; m < 5; m++) { n = (m - 3 + 1) * 5; for (i = grid_points[0] - 3; i >= 0; i--) { i1 = i + 1; i2 = i + 2; #pragma omp for nowait for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i1][j][k] - lhs[n + 4][i][j][k] * rhs[m][i2][j][k]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([rhs.f, i, comz1, rho_i, i, lhs.f, comz5, dtty1, grid_points.f, rhoq.f, vs, j, dy1, c1c5, speed, dy3, pinvr, _imopVarPre1349, dy5, _imopVarPre1359, _imopVarPre1347, _imopVarPre1357, cv, bt, rhs, dymax, comz4, comz6, i, ninvr, dtty2, con43, grid_points, rhoq, lhs, c2dtty1, lhsy, y_solve, vs.f, _imopVarPre1455, i, c3c4, j, _imopVarPre1453, rho_i.f, _imopVarPre1463, speed.f, cv.f]) #pragma omp barrier } } } ninvr(); } static void y_solve(void ) { #pragma omp parallel { int i; int j; int k; int n; int j1; int j2; int m; double fac1; double fac2; lhsy(); n = 0; for (j = 0; j <= grid_points[1] - 3; j++) { j1 = j + 1; j2 = j + 2; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { fac1 = 1. / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; } lhs[n + 2][i][j1][k] = lhs[n + 2][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 3][i][j][k]; lhs[n + 3][i][j1][k] = lhs[n + 3][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n + 1][i][j1][k] * rhs[m][i][j][k]; } lhs[n + 1][i][j2][k] = lhs[n + 1][i][j2][k] - lhs[n + 0][i][j2][k] * lhs[n + 3][i][j][k]; lhs[n + 2][i][j2][k] = lhs[n + 2][i][j2][k] - lhs[n + 0][i][j2][k] * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j2][k] = rhs[m][i][j2][k] - lhs[n + 0][i][j2][k] * rhs[m][i][j][k]; } } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, i, rhs, lhs.f, grid_points.f, grid_points, lhs]) #pragma omp barrier } j = grid_points[1] - 2; j1 = grid_points[1] - 1; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { fac1 = 1. / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; } lhs[n + 2][i][j1][k] = lhs[n + 2][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 3][i][j][k]; lhs[n + 3][i][j1][k] = lhs[n + 3][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n + 1][i][j1][k] * rhs[m][i][j][k]; } fac2 = 1. / lhs[n + 2][i][j1][k]; for (m = 0; m < 3; m++) { rhs[m][i][j1][k] = fac2 * rhs[m][i][j1][k]; } } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, ainv, k, comz1, comz5, qs, us, ws, c1c5, speed, pinvr, rhs, ainv.f, comz4, dttz2, dzmax, con43, grid_points, c2dttz1, qs.f, z_solve, _imopVarPre1987, _imopVarPre1975, us.f, ws.f, _imopVarPre2081, c3c4, rho_i.f, speed.f, i, u.f, dttz1, rho_i, i, i, lhs.f, tzetar, grid_points.f, rhos.f, lhsz, dz1, i, vs, dz5, c2iv, cv, bt, u, comz6, lhs, rhos, _imopVarPre1977, vs.f, _imopVarPre1985, _imopVarPre2083, dz4, _imopVarPre2091, cv.f, i]) #pragma omp barrier for (m = 3; m < 5; m++) { n = (m - 3 + 1) * 5; for (j = 0; j <= grid_points[1] - 3; j++) { j1 = j + 1; j2 = j + 2; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { fac1 = 1. / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; lhs[n + 2][i][j1][k] = lhs[n + 2][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 3][i][j][k]; lhs[n + 3][i][j1][k] = lhs[n + 3][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 4][i][j][k]; rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n + 1][i][j1][k] * rhs[m][i][j][k]; lhs[n + 1][i][j2][k] = lhs[n + 1][i][j2][k] - lhs[n + 0][i][j2][k] * lhs[n + 3][i][j][k]; lhs[n + 2][i][j2][k] = lhs[n + 2][i][j2][k] - lhs[n + 0][i][j2][k] * lhs[n + 4][i][j][k]; rhs[m][i][j2][k] = rhs[m][i][j2][k] - lhs[n + 0][i][j2][k] * rhs[m][i][j][k]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, i, rhs, lhs.f, grid_points.f, grid_points, lhs]) #pragma omp barrier } j = grid_points[1] - 2; j1 = grid_points[1] - 1; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { fac1 = 1. / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; lhs[n + 2][i][j1][k] = lhs[n + 2][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 3][i][j][k]; lhs[n + 3][i][j1][k] = lhs[n + 3][i][j1][k] - lhs[n + 1][i][j1][k] * lhs[n + 4][i][j][k]; rhs[m][i][j1][k] = rhs[m][i][j1][k] - lhs[n + 1][i][j1][k] * rhs[m][i][j][k]; fac2 = 1. / lhs[n + 2][i][j1][k]; rhs[m][i][j1][k] = fac2 * rhs[m][i][j1][k]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, ainv, k, comz1, comz5, qs, us, ws, c1c5, speed, pinvr, rhs, ainv.f, comz4, dttz2, dzmax, con43, grid_points, c2dttz1, qs.f, z_solve, _imopVarPre1987, _imopVarPre1975, us.f, ws.f, _imopVarPre2081, c3c4, rho_i.f, speed.f, i, u.f, dttz1, rho_i, i, i, lhs.f, tzetar, grid_points.f, rhos.f, lhsz, dz1, i, vs, dz5, c2iv, cv, bt, u, comz6, lhs, rhos, _imopVarPre1977, vs.f, _imopVarPre1985, _imopVarPre2083, dz4, _imopVarPre2091, cv.f, i]) #pragma omp barrier } j = grid_points[1] - 2; j1 = grid_points[1] - 1; n = 0; for (m = 0; m < 3; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j1][k]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([rhs.f, k, ainv, comz1, comz5, qs, us, ws, c1c5, speed, pinvr, rhs, ainv.f, comz4, dttz2, dzmax, con43, grid_points, c2dttz1, qs.f, z_solve, _imopVarPre1987, _imopVarPre1975, us.f, ws.f, _imopVarPre2081, c3c4, rho_i.f, speed.f, i, u.f, rho_i, dttz1, i, i, lhs.f, tzetar, grid_points.f, rhos.f, lhsz, dz1, i, vs, dz5, c2iv, cv, bt, u, comz6, lhs, rhos, _imopVarPre1977, vs.f, _imopVarPre1985, dz4, _imopVarPre2083, _imopVarPre2091, cv.f, i]) #pragma omp barrier } for (m = 3; m < 5; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { n = (m - 3 + 1) * 5; rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j1][k]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([rhs.f, k, ainv, comz1, comz5, qs, us, ws, c1c5, speed, pinvr, rhs, ainv.f, comz4, dttz2, dzmax, con43, grid_points, c2dttz1, qs.f, z_solve, _imopVarPre1987, _imopVarPre1975, us.f, ws.f, _imopVarPre2081, c3c4, rho_i.f, speed.f, i, u.f, rho_i, dttz1, i, i, lhs.f, tzetar, grid_points.f, rhos.f, lhsz, dz1, i, vs, dz5, c2iv, cv, bt, u, comz6, lhs, rhos, _imopVarPre1977, vs.f, _imopVarPre1985, dz4, _imopVarPre2083, _imopVarPre2091, cv.f, i]) #pragma omp barrier } n = 0; for (m = 0; m < 3; m++) { for (j = grid_points[1] - 3; j >= 0; j--) { j1 = j + 1; j2 = j + 2; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j1][k] - lhs[n + 4][i][j][k] * rhs[m][i][j2][k]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([rhs.f, k, ainv, comz1, comz5, qs, us, ws, c1c5, speed, pinvr, rhs, ainv.f, comz4, dttz2, dzmax, con43, grid_points, c2dttz1, qs.f, z_solve, _imopVarPre1987, _imopVarPre1975, us.f, ws.f, _imopVarPre2081, c3c4, rho_i.f, speed.f, i, u.f, rho_i, dttz1, i, i, lhs.f, tzetar, grid_points.f, rhos.f, lhsz, dz1, i, vs, dz5, c2iv, cv, bt, u, comz6, lhs, rhos, _imopVarPre1977, vs.f, _imopVarPre1985, dz4, _imopVarPre2083, _imopVarPre2091, cv.f, i]) #pragma omp barrier } } for (m = 3; m < 5; m++) { n = (m - 3 + 1) * 5; for (j = grid_points[1] - 3; j >= 0; j--) { j1 = j + 1; j2 = j1 + 1; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (k = 1; k <= grid_points[2] - 2; k++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j1][k] - lhs[n + 4][i][j][k] * rhs[m][i][j2][k]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([rhs.f, k, ainv, comz1, comz5, qs, us, ws, c1c5, speed, pinvr, rhs, ainv.f, comz4, dttz2, dzmax, con43, grid_points, c2dttz1, qs.f, z_solve, _imopVarPre1987, _imopVarPre1975, us.f, ws.f, _imopVarPre2081, c3c4, rho_i.f, speed.f, i, u.f, rho_i, dttz1, i, i, lhs.f, tzetar, grid_points.f, rhos.f, lhsz, dz1, i, vs, dz5, c2iv, cv, bt, u, comz6, lhs, rhos, _imopVarPre1977, vs.f, _imopVarPre1985, dz4, _imopVarPre2083, _imopVarPre2091, cv.f, i]) #pragma omp barrier } } } pinvr(); } static void z_solve(void ) { #pragma omp parallel { int i; int j; int k; int n; int k1; int k2; int m; double fac1; double fac2; lhsz(); n = 0; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 0; k <= grid_points[2] - 3; k++) { k1 = k + 1; k2 = k + 2; fac1 = 1. / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; } lhs[n + 2][i][j][k1] = lhs[n + 2][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 3][i][j][k]; lhs[n + 3][i][j][k1] = lhs[n + 3][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n + 1][i][j][k1] * rhs[m][i][j][k]; } lhs[n + 1][i][j][k2] = lhs[n + 1][i][j][k2] - lhs[n + 0][i][j][k2] * lhs[n + 3][i][j][k]; lhs[n + 2][i][j][k2] = lhs[n + 2][i][j][k2] - lhs[n + 0][i][j][k2] * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k2] = rhs[m][i][j][k2] - lhs[n + 0][i][j][k2] * rhs[m][i][j][k]; } } } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, i, lhs.f, grid_points.f, grid_points, lhs]) #pragma omp barrier k = grid_points[2] - 2; k1 = grid_points[2] - 1; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { fac1 = 1. / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; } lhs[n + 2][i][j][k1] = lhs[n + 2][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 3][i][j][k]; lhs[n + 3][i][j][k1] = lhs[n + 3][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 4][i][j][k]; for (m = 0; m < 3; m++) { rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n + 1][i][j][k1] * rhs[m][i][j][k]; } fac2 = 1. / lhs[n + 2][i][j][k1]; for (m = 0; m < 3; m++) { rhs[m][i][j][k1] = fac2 * rhs[m][i][j][k1]; } } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([dy3ty1, lhsx, yycon2, us, dy1, timer_start, dttx2, dzmax, dx2tx1, tz2, _imopVarPre2186, dnzm1, us.f, yycon1, c1c2, i, _imopVarPre731, i, xce.f, timer_stop, lhsz, dx1, dz5, dy3, add, ue, dt, i, dnym1, _imopVarPre1977, ty2, lhsy, y_solve, dz4tz1, dz4, _imopVarPre1463, m, ue.f, dy2ty1, ainv, c1, comz1, _imopVarPre2160, rhon, compute_rhs, _imopVarPre729, zzcon4, i, j, forcing.f, dy5, c2, _imopVarPre825, _imopVarPre2161, ainv.f, con43, zzcon5, tx2, z_solve, xxcon1, i, rho_i.f, _imopVarPre1453, dx2, forcing, dx3tx1, u.f, i, rho_i, i, _imopVarPre719, dnxm1, buf, ce, buf.f, _imopVarPre2168, i, _imopVarPre2178, dx5, _imopVarPre1357, j, cv, u, bt, dz3tz1, _imopVarPre827, initialize, ce.f, dssp, _imopVarPre1455, _imopVarPre172, cv.f, verified, k, comz5, rhoq.f, _imopVarPre2174, qs, cuf, _imopVarPre2166, xxcon4, speed, pinvr, exact_rhs, _imopVarPre1359, _imopVarPre1347, adi, comz4, exact_solution, zzcon1, rhoq, _imopVarPre2173, c2dttz1, error_norm, qs.f, dy1ty1, _imopVarPre2167, xxcon5, _imopVarPre2081, c3c4, _imopVarPre2179, speed.f, q.f, dx4tx1, _imopVarPre2162, dttz1, lhs.f, i, square.f, tzetar, rhs_norm, grid_points.f, zzcon2, _imopVarPre2172, dz2tz1, xxcon2, sqrt, _imopVarPre1349, q, c2iv, _imopVarPre835, comz6, ninvr, rhon.f, zzcon3, lhs, cuf.f, timer_read, lhsinit, xxcon3, verify, j, txinvr, _imopVarPre2091, i, rhs.f, _imopVarPre721, printf, dtty1, _imopVarPre2180, ws, c1c5, dy4ty1, dx1tx1, m, rhs, dymax, square, dttz2, c2dttx1, grid_points, yycon5, _imopVarPre1987, x_solve, _imopVarPre1975, ws.f, j, dz1tz1, dttx1, rhos.f, dxmax, dx5tx1, _imopVarPre2185, yycon4, dz1, vs, timer_clear, xcr.f, i, dtty2, dy5ty1, fabs, rhos, c2dtty1, yycon3, vs.f, _imopVarPre1985, _imopVarPre2184, dz5tz1, _imopVarPre2083]) #pragma omp barrier for (m = 3; m < 5; m++) { n = (m - 3 + 1) * 5; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = 0; k <= grid_points[2] - 3; k++) { k1 = k + 1; k2 = k + 2; fac1 = 1. / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; lhs[n + 2][i][j][k1] = lhs[n + 2][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 3][i][j][k]; lhs[n + 3][i][j][k1] = lhs[n + 3][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 4][i][j][k]; rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n + 1][i][j][k1] * rhs[m][i][j][k]; lhs[n + 1][i][j][k2] = lhs[n + 1][i][j][k2] - lhs[n + 0][i][j][k2] * lhs[n + 3][i][j][k]; lhs[n + 2][i][j][k2] = lhs[n + 2][i][j][k2] - lhs[n + 0][i][j][k2] * lhs[n + 4][i][j][k]; rhs[m][i][j][k2] = rhs[m][i][j][k2] - lhs[n + 0][i][j][k2] * rhs[m][i][j][k]; } } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([rhs.f, rhs, i, lhs.f, grid_points.f, grid_points, lhs]) #pragma omp barrier k = grid_points[2] - 2; k1 = grid_points[2] - 1; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { fac1 = 1. / lhs[n + 2][i][j][k]; lhs[n + 3][i][j][k] = fac1 * lhs[n + 3][i][j][k]; lhs[n + 4][i][j][k] = fac1 * lhs[n + 4][i][j][k]; rhs[m][i][j][k] = fac1 * rhs[m][i][j][k]; lhs[n + 2][i][j][k1] = lhs[n + 2][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 3][i][j][k]; lhs[n + 3][i][j][k1] = lhs[n + 3][i][j][k1] - lhs[n + 1][i][j][k1] * lhs[n + 4][i][j][k]; rhs[m][i][j][k1] = rhs[m][i][j][k1] - lhs[n + 1][i][j][k1] * rhs[m][i][j][k]; fac2 = 1. / lhs[n + 2][i][j][k1]; rhs[m][i][j][k1] = fac2 * rhs[m][i][j][k1]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f, lhs.f]) read([dy3ty1, lhsx, yycon2, us, dy1, timer_start, dttx2, dzmax, dx2tx1, tz2, _imopVarPre2186, dnzm1, us.f, yycon1, c1c2, i, _imopVarPre731, i, xce.f, timer_stop, lhsz, dx1, dz5, dy3, add, ue, dt, i, dnym1, _imopVarPre1977, ty2, lhsy, y_solve, dz4tz1, dz4, _imopVarPre1463, m, ue.f, dy2ty1, ainv, c1, comz1, _imopVarPre2160, rhon, compute_rhs, _imopVarPre729, zzcon4, i, j, forcing.f, dy5, c2, _imopVarPre825, _imopVarPre2161, ainv.f, con43, zzcon5, tx2, z_solve, xxcon1, i, rho_i.f, _imopVarPre1453, dx2, forcing, dx3tx1, u.f, i, rho_i, i, _imopVarPre719, dnxm1, buf, ce, buf.f, _imopVarPre2168, i, _imopVarPre2178, dx5, _imopVarPre1357, j, cv, u, bt, dz3tz1, _imopVarPre827, initialize, ce.f, dssp, _imopVarPre1455, _imopVarPre172, cv.f, verified, k, comz5, rhoq.f, _imopVarPre2174, qs, cuf, _imopVarPre2166, xxcon4, speed, pinvr, exact_rhs, _imopVarPre1359, _imopVarPre1347, adi, comz4, exact_solution, zzcon1, rhoq, _imopVarPre2173, c2dttz1, error_norm, qs.f, dy1ty1, _imopVarPre2167, xxcon5, _imopVarPre2081, c3c4, _imopVarPre2179, speed.f, q.f, dx4tx1, _imopVarPre2162, dttz1, lhs.f, i, square.f, tzetar, rhs_norm, grid_points.f, zzcon2, _imopVarPre2172, dz2tz1, xxcon2, sqrt, _imopVarPre1349, q, c2iv, _imopVarPre835, comz6, ninvr, rhon.f, zzcon3, lhs, cuf.f, timer_read, lhsinit, xxcon3, verify, j, txinvr, _imopVarPre2091, i, rhs.f, _imopVarPre721, printf, dtty1, _imopVarPre2180, ws, c1c5, dy4ty1, dx1tx1, m, rhs, dymax, square, dttz2, c2dttx1, grid_points, yycon5, _imopVarPre1987, x_solve, _imopVarPre1975, ws.f, j, dz1tz1, dttx1, rhos.f, dxmax, dx5tx1, _imopVarPre2185, yycon4, dz1, vs, timer_clear, xcr.f, i, dtty2, dy5ty1, fabs, rhos, c2dtty1, yycon3, vs.f, _imopVarPre1985, _imopVarPre2184, dz5tz1, _imopVarPre2083]) #pragma omp barrier } k = grid_points[2] - 2; k1 = grid_points[2] - 1; n = 0; for (m = 0; m < 3; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j][k1]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([dy3ty1, lhsx, yycon2, us, dy1, timer_start, dttx2, dzmax, dx2tx1, tz2, _imopVarPre2186, dnzm1, us.f, yycon1, c1c2, i, _imopVarPre731, i, xce.f, timer_stop, lhsz, dx1, dz5, dy3, add, ue, dt, i, dnym1, _imopVarPre1977, ty2, lhsy, y_solve, dz4tz1, dz4, _imopVarPre1463, m, ue.f, dy2ty1, ainv, c1, comz1, _imopVarPre2160, rhon, compute_rhs, _imopVarPre729, zzcon4, i, j, forcing.f, dy5, c2, _imopVarPre825, _imopVarPre2161, ainv.f, con43, zzcon5, tx2, z_solve, xxcon1, i, rho_i.f, _imopVarPre1453, dx2, forcing, dx3tx1, u.f, i, rho_i, i, _imopVarPre719, dnxm1, buf, ce, buf.f, _imopVarPre2168, i, _imopVarPre2178, dx5, _imopVarPre1357, j, cv, u, bt, dz3tz1, _imopVarPre827, initialize, ce.f, dssp, _imopVarPre1455, _imopVarPre172, cv.f, verified, k, comz5, rhoq.f, _imopVarPre2174, qs, cuf, _imopVarPre2166, xxcon4, speed, pinvr, exact_rhs, _imopVarPre1359, _imopVarPre1347, adi, comz4, exact_solution, zzcon1, rhoq, _imopVarPre2173, c2dttz1, error_norm, qs.f, dy1ty1, _imopVarPre2167, xxcon5, _imopVarPre2081, c3c4, _imopVarPre2179, speed.f, q.f, dx4tx1, _imopVarPre2162, dttz1, lhs.f, i, square.f, tzetar, rhs_norm, grid_points.f, zzcon2, _imopVarPre2172, dz2tz1, xxcon2, sqrt, _imopVarPre1349, q, c2iv, _imopVarPre835, comz6, ninvr, rhon.f, zzcon3, lhs, cuf.f, timer_read, lhsinit, xxcon3, verify, j, txinvr, _imopVarPre2091, i, rhs.f, _imopVarPre721, printf, dtty1, _imopVarPre2180, ws, c1c5, dy4ty1, dx1tx1, m, rhs, dymax, square, dttz2, c2dttx1, grid_points, yycon5, _imopVarPre1987, x_solve, _imopVarPre1975, ws.f, j, dz1tz1, dttx1, rhos.f, dxmax, dx5tx1, _imopVarPre2185, yycon4, dz1, vs, timer_clear, xcr.f, i, dtty2, dy5ty1, fabs, rhos, c2dtty1, yycon3, vs.f, _imopVarPre1985, _imopVarPre2184, dz5tz1, _imopVarPre2083]) #pragma omp barrier } for (m = 3; m < 5; m++) { n = (m - 3 + 1) * 5; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j][k1]; } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([dy3ty1, lhsx, yycon2, us, dy1, timer_start, dttx2, dzmax, dx2tx1, tz2, _imopVarPre2186, dnzm1, us.f, yycon1, c1c2, i, _imopVarPre731, i, xce.f, timer_stop, lhsz, dx1, dz5, dy3, add, ue, dt, i, dnym1, _imopVarPre1977, ty2, lhsy, y_solve, dz4tz1, dz4, _imopVarPre1463, m, ue.f, dy2ty1, ainv, c1, comz1, _imopVarPre2160, rhon, compute_rhs, _imopVarPre729, zzcon4, i, j, forcing.f, dy5, c2, _imopVarPre825, _imopVarPre2161, ainv.f, con43, zzcon5, tx2, z_solve, xxcon1, i, rho_i.f, _imopVarPre1453, dx2, forcing, dx3tx1, u.f, i, rho_i, i, _imopVarPre719, dnxm1, buf, ce, buf.f, _imopVarPre2168, i, _imopVarPre2178, dx5, _imopVarPre1357, j, cv, u, bt, dz3tz1, _imopVarPre827, initialize, ce.f, dssp, _imopVarPre1455, _imopVarPre172, cv.f, verified, k, comz5, rhoq.f, _imopVarPre2174, qs, cuf, _imopVarPre2166, xxcon4, speed, pinvr, exact_rhs, _imopVarPre1359, _imopVarPre1347, adi, comz4, exact_solution, zzcon1, rhoq, _imopVarPre2173, c2dttz1, error_norm, qs.f, dy1ty1, _imopVarPre2167, xxcon5, _imopVarPre2081, c3c4, _imopVarPre2179, speed.f, q.f, dx4tx1, _imopVarPre2162, dttz1, lhs.f, i, square.f, tzetar, rhs_norm, grid_points.f, zzcon2, _imopVarPre2172, dz2tz1, xxcon2, sqrt, _imopVarPre1349, q, c2iv, _imopVarPre835, comz6, ninvr, rhon.f, zzcon3, lhs, cuf.f, timer_read, lhsinit, xxcon3, verify, j, txinvr, _imopVarPre2091, i, rhs.f, _imopVarPre721, printf, dtty1, _imopVarPre2180, ws, c1c5, dy4ty1, dx1tx1, m, rhs, dymax, square, dttz2, c2dttx1, grid_points, yycon5, _imopVarPre1987, x_solve, _imopVarPre1975, ws.f, j, dz1tz1, dttx1, rhos.f, dxmax, dx5tx1, _imopVarPre2185, yycon4, dz1, vs, timer_clear, xcr.f, i, dtty2, dy5ty1, fabs, rhos, c2dtty1, yycon3, vs.f, _imopVarPre1985, _imopVarPre2184, dz5tz1, _imopVarPre2083]) #pragma omp barrier } n = 0; for (m = 0; m < 3; m++) { #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = grid_points[2] - 3; k >= 0; k--) { k1 = k + 1; k2 = k + 2; rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j][k1] - lhs[n + 4][i][j][k] * rhs[m][i][j][k2]; } } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([dy3ty1, lhsx, yycon2, us, dy1, timer_start, dttx2, dzmax, dx2tx1, tz2, _imopVarPre2186, dnzm1, us.f, yycon1, c1c2, i, _imopVarPre731, i, xce.f, timer_stop, lhsz, dx1, dz5, dy3, add, ue, dt, i, dnym1, _imopVarPre1977, ty2, lhsy, y_solve, dz4tz1, dz4, _imopVarPre1463, m, ue.f, dy2ty1, ainv, c1, comz1, _imopVarPre2160, rhon, compute_rhs, _imopVarPre729, zzcon4, i, j, forcing.f, dy5, c2, _imopVarPre825, _imopVarPre2161, ainv.f, con43, zzcon5, tx2, z_solve, xxcon1, i, rho_i.f, _imopVarPre1453, dx2, forcing, dx3tx1, u.f, i, rho_i, i, _imopVarPre719, dnxm1, buf, ce, buf.f, _imopVarPre2168, i, _imopVarPre2178, dx5, _imopVarPre1357, j, cv, u, bt, dz3tz1, _imopVarPre827, initialize, ce.f, dssp, _imopVarPre1455, _imopVarPre172, cv.f, verified, k, comz5, rhoq.f, _imopVarPre2174, qs, cuf, _imopVarPre2166, xxcon4, speed, pinvr, exact_rhs, _imopVarPre1359, _imopVarPre1347, adi, comz4, exact_solution, zzcon1, rhoq, _imopVarPre2173, c2dttz1, error_norm, qs.f, dy1ty1, _imopVarPre2167, xxcon5, _imopVarPre2081, c3c4, _imopVarPre2179, speed.f, q.f, dx4tx1, _imopVarPre2162, dttz1, lhs.f, i, square.f, tzetar, rhs_norm, grid_points.f, zzcon2, _imopVarPre2172, dz2tz1, xxcon2, sqrt, _imopVarPre1349, q, c2iv, _imopVarPre835, comz6, ninvr, rhon.f, zzcon3, lhs, cuf.f, timer_read, lhsinit, xxcon3, verify, j, txinvr, _imopVarPre2091, i, rhs.f, _imopVarPre721, printf, dtty1, _imopVarPre2180, ws, c1c5, dy4ty1, dx1tx1, m, rhs, dymax, square, dttz2, c2dttx1, grid_points, yycon5, _imopVarPre1987, x_solve, _imopVarPre1975, ws.f, j, dz1tz1, dttx1, rhos.f, dxmax, dx5tx1, _imopVarPre2185, yycon4, dz1, vs, timer_clear, xcr.f, i, dtty2, dy5ty1, fabs, rhos, c2dtty1, yycon3, vs.f, _imopVarPre1985, _imopVarPre2184, dz5tz1, _imopVarPre2083]) #pragma omp barrier } for (m = 3; m < 5; m++) { n = (m - 3 + 1) * 5; #pragma omp for nowait for (i = 1; i <= grid_points[0] - 2; i++) { for (j = 1; j <= grid_points[1] - 2; j++) { for (k = grid_points[2] - 3; k >= 0; k--) { k1 = k + 1; k2 = k + 2; rhs[m][i][j][k] = rhs[m][i][j][k] - lhs[n + 3][i][j][k] * rhs[m][i][j][k1] - lhs[n + 4][i][j][k] * rhs[m][i][j][k2]; } } } // #pragma omp dummyFlush BARRIER_START written([rhs.f]) read([dy3ty1, lhsx, yycon2, us, dy1, timer_start, dttx2, dzmax, dx2tx1, tz2, _imopVarPre2186, dnzm1, us.f, yycon1, c1c2, i, _imopVarPre731, i, xce.f, timer_stop, lhsz, dx1, dz5, dy3, add, ue, dt, i, dnym1, _imopVarPre1977, ty2, lhsy, y_solve, dz4tz1, dz4, _imopVarPre1463, m, ue.f, dy2ty1, ainv, c1, comz1, _imopVarPre2160, rhon, compute_rhs, _imopVarPre729, zzcon4, i, j, forcing.f, dy5, c2, _imopVarPre825, _imopVarPre2161, ainv.f, con43, zzcon5, tx2, z_solve, xxcon1, i, rho_i.f, _imopVarPre1453, dx2, forcing, dx3tx1, u.f, i, rho_i, i, _imopVarPre719, dnxm1, buf, ce, buf.f, _imopVarPre2168, i, _imopVarPre2178, dx5, _imopVarPre1357, j, cv, u, bt, dz3tz1, _imopVarPre827, initialize, ce.f, dssp, _imopVarPre1455, _imopVarPre172, cv.f, verified, k, comz5, rhoq.f, _imopVarPre2174, qs, cuf, _imopVarPre2166, xxcon4, speed, pinvr, exact_rhs, _imopVarPre1359, _imopVarPre1347, adi, comz4, exact_solution, zzcon1, rhoq, _imopVarPre2173, c2dttz1, error_norm, qs.f, dy1ty1, _imopVarPre2167, xxcon5, _imopVarPre2081, c3c4, _imopVarPre2179, speed.f, q.f, dx4tx1, _imopVarPre2162, dttz1, lhs.f, i, square.f, tzetar, rhs_norm, grid_points.f, zzcon2, _imopVarPre2172, dz2tz1, xxcon2, sqrt, _imopVarPre1349, q, c2iv, _imopVarPre835, comz6, ninvr, rhon.f, zzcon3, lhs, cuf.f, timer_read, lhsinit, xxcon3, verify, j, txinvr, _imopVarPre2091, i, rhs.f, _imopVarPre721, printf, dtty1, _imopVarPre2180, ws, c1c5, dy4ty1, dx1tx1, m, rhs, dymax, square, dttz2, c2dttx1, grid_points, yycon5, _imopVarPre1987, x_solve, _imopVarPre1975, ws.f, j, dz1tz1, dttx1, rhos.f, dxmax, dx5tx1, _imopVarPre2185, yycon4, dz1, vs, timer_clear, xcr.f, i, dtty2, dy5ty1, fabs, rhos, c2dtty1, yycon3, vs.f, _imopVarPre1985, _imopVarPre2184, dz5tz1, _imopVarPre2083]) #pragma omp barrier } } tzetar(); }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 8; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_unop__isfinite_bool_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__isfinite_bool_fp64 // op(A') function: GB_unop_tran__isfinite_bool_fp64 // C type: bool // A type: double // cast: double cij = (aij) // unaryop: cij = isfinite (aij) #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = isfinite (x) ; // casting #define GB_CAST(z, aij) \ double z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (aij) ; \ Cx [pC] = isfinite (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISFINITE || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__isfinite_bool_fp64 ( bool *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = (aij) ; Cx [p] = isfinite (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = (aij) ; Cx [p] = isfinite (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__isfinite_bool_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 32; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
GB_unop__cos_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__cos_fp32_fp32 // op(A') function: GB_unop_tran__cos_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = cosf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = cosf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = cosf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_COS || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__cos_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = cosf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = cosf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__cos_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ft_ao.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Fourier transformed AO pair * \int e^{-i Gv \cdot r} i(r) * j(r) dr^3 * * eval_gz, b, gxyz, gs: * - when eval_gz is GTO_Gv_uniform_orth * > b (reciprocal vectors) is diagonal 3x3 matrix * > Gv k-space grids = dot(b.T,gxyz) * > gxyz[3,nGv] = (kx[:nGv], ky[:nGv], kz[:nGv]) * > gs[3]: The number of G-vectors along each direction (nGv=gs[0]*gs[1]*gs[2]). * - when eval_gz is GTO_Gv_uniform_nonorth * > b is 3x3 matrix = 2\pi * scipy.linalg.inv(cell.lattice_vectors).T * > Gv k-space grids = dot(b.T,gxyz) * > gxyz[3,nGv] = (kx[:nGv], ky[:nGv], kz[:nGv]) * > gs[3]: The number of *positive* G-vectors along each direction. * - when eval_gz is GTO_Gv_general * only Gv is needed * - when eval_gz is GTO_Gv_nonuniform_orth * > b is the basic G value for each cartesian component * Gx = b[:gs[0]] * Gy = b[gs[0]:gs[0]+gs[1]] * Gz = b[gs[0]+gs[1]:] * > gs[3]: Number of basic G values along each direction. * > gxyz[3,nGv] are used to index the basic G value * > Gv is not used */ #include <stdlib.h> #include <string.h> #include <math.h> #include <assert.h> #include <complex.h> #include "config.h" #include "cint.h" #include "gto/ft_ao.h" #define SQRTPI 1.7724538509055160272981674833411451 #define EXPCUTOFF 100 #define NCTRMAX 72 double CINTsquare_dist(const double *r1, const double *r2); double CINTcommon_fac_sp(int l); /* * Pyscf-1.5 (and older) use libcint function CINTinit_int1e_EnvVars and * CINTg1e_index_xyz. It's unsafe since the CINTEnvVars type was redefined * in ft_ao.h. Copy the contents of CINTinit_int1e_EnvVars and * CINTg1e_index_xyz here. */ #define IINC 0 #define JINC 1 #define GSHIFT 4 #define POS_E1 5 #define RYS_ROOTS 6 #define TENSOR 7 void GTO_ft_init1e_envs(CINTEnvVars *envs, int *ng, int *shls, int *atm, int natm, int *bas, int nbas, double *env) { envs->natm = natm; envs->nbas = nbas; envs->atm = atm; envs->bas = bas; envs->env = env; envs->shls = shls; const int i_sh = shls[0]; const int j_sh = shls[1]; envs->i_l = bas(ANG_OF, i_sh); envs->j_l = bas(ANG_OF, j_sh); envs->x_ctr[0] = bas(NCTR_OF, i_sh); envs->x_ctr[1] = bas(NCTR_OF, j_sh); envs->nfi = (envs->i_l+1)*(envs->i_l+2)/2; envs->nfj = (envs->j_l+1)*(envs->j_l+2)/2; envs->nf = envs->nfi * envs->nfj; envs->common_factor = 1; envs->gbits = ng[GSHIFT]; envs->ncomp_e1 = ng[POS_E1]; envs->ncomp_tensor = ng[TENSOR]; envs->li_ceil = envs->i_l + ng[IINC]; envs->lj_ceil = envs->j_l + ng[JINC]; if (ng[RYS_ROOTS] > 0) { envs->nrys_roots = ng[RYS_ROOTS]; } else { envs->nrys_roots = (envs->li_ceil + envs->lj_ceil)/2 + 1; } envs->ri = env + atm(PTR_COORD, bas(ATOM_OF, i_sh)); envs->rj = env + atm(PTR_COORD, bas(ATOM_OF, j_sh)); int dli, dlj; if (envs->li_ceil < envs->lj_ceil) { dli = envs->li_ceil + 1; dlj = envs->li_ceil + envs->lj_ceil + 1; } else { dli = envs->li_ceil + envs->lj_ceil + 1; dlj = envs->lj_ceil + 1; } envs->g_stride_i = 1; envs->g_stride_j = dli; envs->g_size = dli * dlj; envs->lk_ceil = 1; envs->ll_ceil = 1; envs->g_stride_k = 0; envs->g_stride_l = 0; } #define CART_MAX 128 // > (ANG_MAX*(ANG_MAX+1)/2) void CINTcart_comp(int *nx, int *ny, int *nz, const int lmax); static void _g2c_index_xyz(int *idx, const CINTEnvVars *envs) { int i_l = envs->i_l; int j_l = envs->j_l; int nfi = envs->nfi; int nfj = envs->nfj; int di = envs->g_stride_i; int dj = envs->g_stride_j; int i, j, n; int ofx, ofjx; int ofy, ofjy; int ofz, ofjz; int i_nx[CART_MAX], i_ny[CART_MAX], i_nz[CART_MAX]; int j_nx[CART_MAX], j_ny[CART_MAX], j_nz[CART_MAX]; CINTcart_comp(i_nx, i_ny, i_nz, i_l); CINTcart_comp(j_nx, j_ny, j_nz, j_l); ofx = 0; ofy = envs->g_size; ofz = envs->g_size * 2; n = 0; for (j = 0; j < nfj; j++) { ofjx = ofx + dj * j_nx[j]; ofjy = ofy + dj * j_ny[j]; ofjz = ofz + dj * j_nz[j]; for (i = 0; i < nfi; i++) { idx[n+0] = ofjx + di * i_nx[i]; idx[n+1] = ofjy + di * i_ny[i]; idx[n+2] = ofjz + di * i_nz[i]; n += 3; } } } static const int _LEN_CART[] = { 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 66, 78, 91, 105, 120, 136 }; static const int _CUM_LEN_CART[] = { 1, 4, 10, 20, 35, 56, 84, 120, 165, 220, 286, 364, 455, 560, 680, 816, }; /* * WHEREX_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if x > 0] * WHEREY_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if y > 0] * WHEREZ_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if z > 0] */ static const int _UPIDY[] = { 1, 3, 4, 6, 7, 8, 10, 11, 12, 13, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48, 49, 50, 51, 52, 53, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 91, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103, 105,106,107,108,109,110,111,112,113,114,115,116,117,118, 120,121,122,123,124,125,126,127,128,129,130,131,132,133,134, }; static const int _UPIDZ[] = { 2, 4, 5, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 19, 20, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103,104, 106,107,108,109,110,111,112,113,114,115,116,117,118,119, 121,122,123,124,125,126,127,128,129,130,131,132,133,134,135, }; /* * _DOWN_XYZ, _DOWN_XYZ_ORDER, _DOWN1, _DOWN2 labels the index in the 1D * recursive relation f_{i+1} = i/2a * f_{i-1} + X * f_{i} * _DOWN_XYZ_ORDER i in i/2a * _DOWN2 index of f_{i-1} * _DOWN_XYZ index of X * _DOWN1 index of f_{i} */ static const int _DOWN1[] = { -1, 0, 0, 0, 0, 1, 2, 1, 2, 2, 0, 0, 0, 3, 4, 5, 3, 3, 5, 5, 0, 0, 0, 3, 2, 5, 6, 7, 8, 9, 6, 6, 8, 9, 9, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 11, 12, 13, 14, 10, 10, 12, 13, 14, 14, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 16, 17, 18, 19, 20, 15, 15, 17, 18, 19, 20, 20, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 22, 23, 24, 25, 26, 27, 21, 21, 23, 24, 25, 26, 27, 27, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 29, 30, 31, 32, 33, 34, 35, 28, 28, 30, 31, 32, 33, 34, 35, 35, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 36, 36, 38, 39, 40, 41, 42, 43, 44, 44, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 45, 45, 47, 48, 49, 50, 51, 52, 53, 54, 54, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 55, 55, 57, 58, 59, 60, 61, 62, 63, 64, 65, 65, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 66, 66, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 77, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 78, 78, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 90, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 66, 80, 81, 82, 83, 84, 85, 86, 87, 88, 77, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 91, 91, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 104, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 66, 80, 81, 82, 83, 84, 85, 86, 87, 88, 77, 90, 91, 78, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 90, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 105, 105, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 119, }; static const int _DOWN2[] = { -1, -1, -1, -1, 0, -1, -1, 0, -1, 0, 0, -1, -1, -1, -1, -1, 1, -1, -1, 2, 0, -1, -1, 3, -1, 5, -1, -1, -1, -1, 3, -1, 5, -1, 5, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, -1, -1, -1, -1, -1, 6, -1, 8, 9, -1, 9, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, -1, -1, -1, -1, -1, -1, 10, -1, 12, 13, 14, -1, 14, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, -1, -1, -1, -1, -1, -1, -1, 15, -1, 17, 18, 19, 20, -1, 20, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, -1, -1, -1, -1, -1, -1, -1, -1, 21, -1, 23, 24, 25, 26, 27, -1, 27, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, -1, -1, -1, -1, -1, -1, -1, -1, -1, 28, -1, 30, 31, 32, 33, 34, 35, -1, 35, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 36, -1, 38, 39, 40, 41, 42, 43, 44, -1, 44, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 45, -1, 47, 48, 49, 50, 51, 52, 53, 54, -1, 54, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 55, -1, 57, 58, 59, 60, 61, 62, 63, 64, 65, -1, 65, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, -1, 77, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, -1, 90, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, -1, 90, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, -1, 90, 91, -1, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, -1, 104, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 91, -1, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, -1, 104, }; static const int _DOWN_XYZ[] = { 2, 0, 1, 2, 0, 0, 0, 1, 1, 2, 0, 1, 2, 0, 0, 0, 1, 2, 1, 2, 0, 1, 2, 0, 1, 0, 0, 0, 0, 0, 1, 2, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, }; static const int _DOWN_XYZ_ORDER[] = { 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 2, 0, 0, 0, 0, 0, 2, 0, 0, 2, 3, 0, 0, 1, 0, 1, 0, 0, 0, 0, 3, 0, 1, 0, 3, 4, 0, 0, 2, 0, 2, 1, 0, 0, 1, 0, 0, 0, 0, 0, 4, 0, 2, 1, 0, 4, 5, 0, 0, 3, 0, 3, 2, 0, 0, 2, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 5, 0, 3, 2, 1, 0, 5, 6, 0, 0, 4, 0, 4, 3, 0, 0, 3, 2, 0, 2, 0, 2, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 6, 0, 4, 3, 2, 1, 0, 6, 7, 0, 0, 5, 0, 5, 4, 0, 0, 4, 3, 0, 3, 0, 3, 2, 0, 2, 2, 0, 2, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 5, 4, 3, 2, 1, 0, 7, 8, 0, 0, 6, 0, 6, 5, 0, 0, 5, 4, 0, 4, 0, 4, 3, 0, 3, 3, 0, 3, 2, 0, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 6, 5, 4, 3, 2, 1, 0, 8, 9, 0, 0, 7, 0, 7, 6, 0, 0, 6, 5, 0, 5, 0, 5, 4, 0, 4, 4, 0, 4, 3, 0, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 7, 6, 5, 4, 3, 2, 1, 0, 9, 10, 0, 0, 8, 0, 8, 7, 0, 0, 7, 6, 0, 6, 0, 6, 5, 0, 5, 5, 0, 5, 4, 0, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 8, 7, 6, 5, 4, 3, 2, 1, 0, 10, 11, 0, 0, 9, 0, 9, 8, 0, 0, 8, 7, 0, 7, 0, 7, 6, 0, 6, 6, 0, 6, 5, 0, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 11, 12, 0, 0, 10, 0, 10, 9, 0, 0, 9, 8, 0, 8, 0, 8, 7, 0, 7, 7, 0, 7, 6, 0, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 12, 13, 0, 0, 11, 0, 11, 10, 0, 0, 10, 9, 0, 9, 0, 9, 8, 0, 8, 8, 0, 8, 7, 0, 7, 7, 7, 0, 7, 6, 0, 6, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 0, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 13, 14, 0, 0, 12, 0, 12, 11, 0, 0, 11, 10, 0, 10, 0, 10, 9, 0, 9, 9, 0, 9, 8, 0, 8, 8, 8, 0, 8, 7, 0, 7, 7, 7, 7, 0, 7, 6, 0, 6, 6, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 0, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 14, }; #define WHEREX_IF_L_INC1(i) i #define WHEREY_IF_L_INC1(i) _UPIDY[i] #define WHEREZ_IF_L_INC1(i) _UPIDZ[i] #define STARTX_IF_L_DEC1(i) 0 #define STARTY_IF_L_DEC1(i) ((i<2)?0:_LEN_CART[i-2]) #define STARTZ_IF_L_DEC1(i) (_LEN_CART[i-1]-1) #define ADDR_IF_L_DEC1(l,m) _DOWN1[_CUM_LEN_CART[l-1]+m] #define ADDR_IF_L_DEC2(l,m) _DOWN2[_CUM_LEN_CART[l-1]+m] #define DEC1_XYZ(l,m) _DOWN_XYZ[_CUM_LEN_CART[l-1]+m] #define DEC1_XYZ_ORDER(l,m) _DOWN_XYZ_ORDER[_CUM_LEN_CART[l-1]+m] static int vrr1d_withGv(double complex *g, double *rijri, double aij, double *Gv, int topl, size_t NGv) { int cumxyz = 1; if (topl == 0) { return cumxyz; } double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; int i, n, m, l; double a2; double complex *p0, *p1, *p2, *dec1, *dec2; double *ka2 = malloc(sizeof(double) * NGv*3); double *kxa2 = ka2; double *kya2 = kxa2 + NGv; double *kza2 = kya2 + NGv; a2 = .5 / aij; for (n = 0; n < NGv; n++) { kxa2[n] = kx[n] * a2; kya2[n] = ky[n] * a2; kza2[n] = kz[n] * a2; } p0 = g + NGv; for (n = 0; n < NGv; n++) { p0[ n] = (rijri[0] - kxa2[n]*_Complex_I) * g[n]; p0[NGv +n] = (rijri[1] - kya2[n]*_Complex_I) * g[n]; p0[NGv*2+n] = (rijri[2] - kza2[n]*_Complex_I) * g[n]; } cumxyz += 3; for (l = 1; l < topl; l++) { p0 = g + cumxyz * NGv; dec1 = p0 - _LEN_CART[l ] * NGv; dec2 = dec1 - _LEN_CART[l-1] * NGv; for (i = 0; i < _LEN_CART[l+1]; i++) { m = DEC1_XYZ(l+1,i); kxa2 = ka2 + m * NGv; p1 = dec1 + ADDR_IF_L_DEC1(l+1,i) * NGv; p2 = dec2 + ADDR_IF_L_DEC2(l+1,i) * NGv; if (ADDR_IF_L_DEC2(l+1,i) < 0) { for (n = 0; n < NGv; n++) { p0[n] = (rijri[m]-kxa2[n]*_Complex_I)*p1[n]; } } else { a2 = .5/aij * DEC1_XYZ_ORDER(l+1,i); for (n = 0; n < NGv; n++) { p0[n] = a2*p2[n] + (rijri[m]-kxa2[n]*_Complex_I)*p1[n]; } } p0 += NGv; } cumxyz += _LEN_CART[l+1]; } free(ka2); return cumxyz; } /* * if li = 3, lj = 1 * (10 + X*00 -> 01): * gs + X*fs -> fp */ static void vrr2d_ket_inc1_withGv(double complex *out, const double complex *g, double *rirj, int li, int lj, size_t NGv) { if (lj == 0) { memcpy(out, g, sizeof(double complex)*_LEN_CART[li]*NGv); return; } const int row_10 = _LEN_CART[li+1]; const int row_00 = _LEN_CART[li ]; const int col_00 = _LEN_CART[lj-1]; const double complex *g00 = g; const double complex *g10 = g + row_00*col_00*NGv; int i, j, n; const double complex *p00, *p10; double complex *p01 = out; for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)) * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[0] * p00[n]; } p01 += NGv; } } for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)) * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[1] * p00[n]; } p01 += NGv; } } j = STARTZ_IF_L_DEC1(lj); if (j < _LEN_CART[lj-1]) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)) * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[2] * p00[n]; } p01 += NGv; } } } /* * transpose i, j when storing into out */ static void vrr2d_inc1_swapij(double complex *out, const double complex *g, double *rirj, int li, int lj, size_t NGv) { if (lj == 0) { memcpy(out, g, sizeof(double complex)*_LEN_CART[li]*NGv); return; } const int row_01 = _LEN_CART[lj]; const int row_10 = _LEN_CART[li+1]; const int row_00 = _LEN_CART[li ]; const int col_00 = _LEN_CART[lj-1]; const double complex *g00 = g; const double complex *g10 = g + row_00*col_00*NGv; int i, j, n; const double complex *p00, *p10; double complex *p01 = out; for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)) * NGv; p01 = out + i*row_01 * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[0] * p00[n]; } } out += NGv; } for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)) * NGv; p01 = out + i*row_01 * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[1] * p00[n]; } } out += NGv; } j = STARTZ_IF_L_DEC1(lj); if (j < _LEN_CART[lj-1]) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * NGv; p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)) * NGv; p01 = out + i*row_01 * NGv; for (n = 0; n < NGv; n++) { p01[n] = p10[n] + rirj[2] * p00[n]; } } } } static void vrr2d_withGv(double complex *out, double complex *g, double complex *gbuf2, const int li, const int lj, const double *ri, const double *rj, size_t NGv) { const int nmax = li + lj; double complex *g00, *g01, *gswap, *pg00, *pg01; int row_01, col_01, row_00, col_00; int i, j; double rirj[3]; rirj[0] = ri[0] - rj[0]; rirj[1] = ri[1] - rj[1]; rirj[2] = ri[2] - rj[2]; g00 = gbuf2; g01 = g; for (j = 1; j < lj; j++) { gswap = g00; g00 = g01; g01 = gswap; pg00 = g00; pg01 = g01; for (i = li; i <= nmax-j; i++) { vrr2d_ket_inc1_withGv(pg01, pg00, rirj, i, j, NGv); row_01 = _LEN_CART[i]; col_01 = _LEN_CART[j]; row_00 = _LEN_CART[i ]; col_00 = _LEN_CART[j-1]; pg00 += row_00*col_00 * NGv; pg01 += row_01*col_01 * NGv; } } vrr2d_ket_inc1_withGv(out, g01, rirj, li, lj, NGv); } /* (0,li+lj) => (li,lj) */ static void hrr2d_withGv(double complex *out, double complex *g, double complex *gbuf2, const int li, const int lj, const double *ri, const double *rj, size_t NGv) { const int nmax = li + lj; double complex *g00, *g01, *gswap, *pg00, *pg01; int row_01, col_01, row_00, col_00; int i, j; double rjri[3]; rjri[0] = rj[0] - ri[0]; rjri[1] = rj[1] - ri[1]; rjri[2] = rj[2] - ri[2]; g00 = gbuf2; g01 = g; for (i = 1; i < li; i++) { gswap = g00; g00 = g01; g01 = gswap; pg00 = g00; pg01 = g01; for (j = lj; j <= nmax-i; j++) { vrr2d_ket_inc1_withGv(pg01, pg00, rjri, j, i, NGv); row_01 = _LEN_CART[j]; col_01 = _LEN_CART[i]; row_00 = _LEN_CART[j ]; col_00 = _LEN_CART[i-1]; pg00 += row_00*col_00 * NGv; pg01 += row_01*col_01 * NGv; } } vrr2d_inc1_swapij(out, g01, rjri, lj, li, NGv); } /* * Recursive relation */ static void aopair_rr_igtj_early(double complex *g, double ai, double aj, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int topl = envs->li_ceil + envs->lj_ceil; const double aij = ai + aj; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rijri[3]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijri[0] = rij[0] - ri[0]; rijri[1] = rij[1] - ri[1]; rijri[2] = rij[2] - ri[2]; (*eval_gz)(g, aij, rij, fac, Gv, b, gxyz, gs, NGv); vrr1d_withGv(g, rijri, aij, Gv, topl, NGv); } static void aopair_rr_iltj_early(double complex *g, double ai, double aj, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int topl = envs->li_ceil + envs->lj_ceil; const double aij = ai + aj; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rijrj[3]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijrj[0] = rij[0] - rj[0]; rijrj[1] = rij[1] - rj[1]; rijrj[2] = rij[2] - rj[2]; (*eval_gz)(g, aij, rij, fac, Gv, b, gxyz, gs, NGv); vrr1d_withGv(g, rijrj, aij, Gv, topl, NGv); } static void aopair_rr_igtj_lazy(double complex *g, double ai, double aj, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int nmax = envs->li_ceil + envs->lj_ceil; const int lj = envs->lj_ceil; const int dj = envs->g_stride_j; const double aij = ai + aj; const double a2 = .5 / aij; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rirj[3], rijri[3]; double complex *gx = g; double complex *gy = gx + envs->g_size * NGv; double complex *gz = gy + envs->g_size * NGv; double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; size_t off0, off1, off2; int i, j, n, ptr; double ia2; rirj[0] = ri[0] - rj[0]; rirj[1] = ri[1] - rj[1]; rirj[2] = ri[2] - rj[2]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijri[0] = rij[0] - ri[0]; rijri[1] = rij[1] - ri[1]; rijri[2] = rij[2] - ri[2]; for (n = 0; n < NGv; n++) { gx[n] = 1; gy[n] = 1; } (*eval_gz)(gz, aij, rij, fac, Gv, b, gxyz, gs, NGv); if (nmax > 0) { for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[NGv+n] = (rijri[0] - kx[n]*a2*_Complex_I) * gx[n]; gy[NGv+n] = (rijri[1] - ky[n]*a2*_Complex_I) * gy[n]; gz[NGv+n] = (rijri[2] - kz[n]*a2*_Complex_I) * gz[n]; } } } for (i = 1; i < nmax; i++) { off0 = (i-1) * NGv; off1 = i * NGv; off2 = (i+1) * NGv; ia2 = i * a2; for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off2+n] = ia2 * gx[off0+n] + (rijri[0] - kx[n]*a2*_Complex_I) * gx[off1+n]; gy[off2+n] = ia2 * gy[off0+n] + (rijri[1] - ky[n]*a2*_Complex_I) * gy[off1+n]; gz[off2+n] = ia2 * gz[off0+n] + (rijri[2] - kz[n]*a2*_Complex_I) * gz[off1+n]; } } } for (j = 1; j <= lj; j++) { ptr = dj * j; for (i = ptr; i <= ptr + nmax - j; i++) { off0 = i * NGv - dj * NGv; // [i, j-1] off1 = (i+1) * NGv - dj * NGv; // [i+1,j-1] off2 = i * NGv; // [i, j ] for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off2+n] = gx[off1+n] + rirj[0] * gx[off0+n]; gy[off2+n] = gy[off1+n] + rirj[1] * gy[off0+n]; gz[off2+n] = gz[off1+n] + rirj[2] * gz[off0+n]; } } } } } static void aopair_rr_iltj_lazy(double complex *g, double ai, double aj, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int nmax = envs->li_ceil + envs->lj_ceil; const int li = envs->li_ceil; const int dj = envs->g_stride_j; const double aij = ai + aj; const double a2 = .5 / aij; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rirj[3], rijrj[3]; double complex *gx = g; double complex *gy = gx + envs->g_size * NGv; double complex *gz = gy + envs->g_size * NGv; double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; size_t off0, off1, off2; int i, j, n; double ia2; rirj[0] = rj[0] - ri[0]; rirj[1] = rj[1] - ri[1]; rirj[2] = rj[2] - ri[2]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijrj[0] = rij[0] - rj[0]; rijrj[1] = rij[1] - rj[1]; rijrj[2] = rij[2] - rj[2]; for (n = 0; n < NGv; n++) { gx[n] = 1; gy[n] = 1; } (*eval_gz)(gz, aij, rij, fac, Gv, b, gxyz, gs, NGv); if (nmax > 0) { off0 = dj * NGv; for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off0+n] = (rijrj[0] - kx[n]*a2*_Complex_I) * gx[n]; gy[off0+n] = (rijrj[1] - ky[n]*a2*_Complex_I) * gy[n]; gz[off0+n] = (rijrj[2] - kz[n]*a2*_Complex_I) * gz[n]; } } } for (i = 1; i < nmax; i++) { off0 = (i-1) * dj * NGv; off1 = i * dj * NGv; off2 = (i+1) * dj * NGv; ia2 = i * a2; for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off2+n] = ia2 * gx[off0+n] + (rijrj[0] - kx[n]*a2*_Complex_I) * gx[off1+n]; gy[off2+n] = ia2 * gy[off0+n] + (rijrj[1] - ky[n]*a2*_Complex_I) * gy[off1+n]; gz[off2+n] = ia2 * gz[off0+n] + (rijrj[2] - kz[n]*a2*_Complex_I) * gz[off1+n]; } } } for (i = 1; i <= li; i++) { for (j = 0; j <= nmax - i; j++) { off0 = (i-1) * NGv + j * dj * NGv; // [i-1,j ] off1 = (i-1) * NGv + (j+1) * dj * NGv; // [i-1,j+1] off2 = i * NGv + j * dj * NGv; // [i ,j ] for (n = 0; n < NGv; n++) { if (gz[n] != 0) { gx[off2+n] = gx[off1+n] + rirj[0] * gx[off0+n]; gy[off2+n] = gy[off1+n] + rirj[1] * gy[off0+n]; gz[off2+n] = gz[off1+n] + rirj[2] * gz[off0+n]; } } } } } static void inner_prod(double complex *g, double complex *gout, int *idx, const CINTEnvVars *envs, double *Gv, size_t NGv, int empty) { int ix, iy, iz, n, k; double complex *gz = g + envs->g_size * NGv * 2; if (empty) { for (n = 0; n < envs->nf; n++) { ix = idx[n*3+0]; iy = idx[n*3+1]; iz = idx[n*3+2]; for (k = 0; k < NGv; k++) { if (gz[k] != 0) { gout[n*NGv+k] = g[ix*NGv+k] * g[iy*NGv+k] * g[iz*NGv+k]; } else { gout[n*NGv+k] = 0; } } } } else { for (n = 0; n < envs->nf; n++) { ix = idx[n*3+0]; iy = idx[n*3+1]; iz = idx[n*3+2]; for (k = 0; k < NGv; k++) { if (gz[k] != 0) { gout[n*NGv+k] += g[ix*NGv+k] * g[iy*NGv+k] * g[iz*NGv+k]; } } } } } static void prim_to_ctr(double complex *gc, const size_t nf, double complex *gp, const int nprim, const int nctr, const double *coeff, int empty) { size_t n, i; double c; if (empty) { for (n = 0; n < nctr; n++) { c = coeff[nprim*n]; for (i = 0; i < nf; i++) { gc[i] = gp[i] * c; } gc += nf; } } else { for (n = 0; n < nctr; n++) { c = coeff[nprim*n]; if (c != 0) { for (i = 0; i < nf; i++) { gc[i] += gp[i] * c; } } gc += nf; } } } static void transpose(double complex *out, double complex *in, int nf, int comp, size_t NGv) { size_t n, k, ic; double complex *pin; for (ic = 0; ic < comp; ic++) { for (n = 0; n < nf; n++) { pin = in + (n*comp+ic) * NGv; for (k = 0; k < NGv; k++) { out[n*NGv+k] = pin[k]; } } out += nf * NGv; } } static const int _GBUFSIZE[] = { 1, 4, 10, 10, 20, 48, 20, 35, 75, 150, 35, 56, 108, 216, 384, 56, 84, 147, 294, 510, 850, 84, 120, 192, 384, 654, 1090, 1640, 120, 165, 243, 486, 816, 1360, 2040, 3030 }; #define bufsize(i,j) _GBUFSIZE[((i>=j) ? (i*(i+1)/2+j) : (j*(j+1)/2+i))] int GTO_aopair_early_contract(double complex *out, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int *shls = envs->shls; const int *bas = envs->bas; const double *env = envs->env; const int i_sh = shls[0]; const int j_sh = shls[1]; const int i_l = envs->i_l; const int j_l = envs->j_l; const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int i_prim = bas(NPRIM_OF, i_sh); const int j_prim = bas(NPRIM_OF, j_sh); const int nf = envs->nf; const double *ri = envs->ri; const double *rj = envs->rj; const double *ai = env + bas(PTR_EXP, i_sh); const double *aj = env + bas(PTR_EXP, j_sh); const double *ci = env + bas(PTR_COEFF, i_sh); const double *cj = env + bas(PTR_COEFF, j_sh); double fac1i, fac1j; double aij, dij, eij; int ip, jp, n; int empty[2] = {1, 1}; int *jempty = empty + 0; int *iempty = empty + 1; const size_t len1 = bufsize(i_l,j_l) * NGv; const size_t leni = len1 * i_ctr; const size_t lenj = len1 * i_ctr * j_ctr; double complex *gctrj = malloc(sizeof(double complex)*(lenj+leni+len1)); double complex *g = gctrj + lenj; double complex *gctri, *g1d; if (j_ctr == 1) { gctri = gctrj; iempty = jempty; } else { gctri = g; g += leni; } g1d = g; void (*aopair_rr)(); int offset_g1d; if (i_l >= j_l) { aopair_rr = aopair_rr_igtj_early; offset_g1d = _CUM_LEN_CART[i_l] - _LEN_CART[i_l]; } else { aopair_rr = aopair_rr_iltj_early; offset_g1d = _CUM_LEN_CART[j_l] - _LEN_CART[j_l]; } int len_g1d = _CUM_LEN_CART[i_l+j_l] - offset_g1d; double rrij = CINTsquare_dist(ri, rj); double fac1 = SQRTPI * M_PI * CINTcommon_fac_sp(i_l) * CINTcommon_fac_sp(j_l); *jempty = 1; for (jp = 0; jp < j_prim; jp++) { if (j_ctr == 1) { fac1j = fac1 * cj[jp]; } else { fac1j = fac1; *iempty = 1; } for (ip = 0; ip < i_prim; ip++) { aij = ai[ip] + aj[jp]; eij = (ai[ip] * aj[jp] / aij) * rrij; if (eij > EXPCUTOFF) { continue; } dij = exp(-eij) / (aij * sqrt(aij)); fac1i = fac1j * dij; (*aopair_rr)(g, ai[ip], aj[jp], envs, eval_gz, fac*fac1i, Gv, b, gxyz, gs, NGv); prim_to_ctr(gctri, len_g1d*NGv, g1d+offset_g1d*NGv, i_prim, i_ctr, ci+ip, *iempty); *iempty = 0; } if (!*iempty) { if (j_ctr > 1) { prim_to_ctr(gctrj, i_ctr*len_g1d*NGv, gctri, j_prim,j_ctr, cj+jp, *jempty); } *jempty = 0; } } if (!*jempty) { g1d = gctrj; for (n = 0; n < i_ctr*j_ctr; n++) { if (i_l >= j_l) { vrr2d_withGv(out+n*nf*NGv, g1d, gctrj+lenj, envs->li_ceil, envs->lj_ceil, ri, rj, NGv); } else { hrr2d_withGv(out+n*nf*NGv, g1d, gctrj+lenj, envs->li_ceil, envs->lj_ceil, ri, rj, NGv); } g1d += len_g1d * NGv; } } free(gctrj); return !*jempty; } int GTO_aopair_lazy_contract(double complex *gctr, CINTEnvVars *envs, FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int *shls = envs->shls; const int *bas = envs->bas; const double *env = envs->env; const int i_sh = shls[0]; const int j_sh = shls[1]; const int i_l = envs->i_l; const int j_l = envs->j_l; const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int i_prim = bas(NPRIM_OF, i_sh); const int j_prim = bas(NPRIM_OF, j_sh); const int n_comp = envs->ncomp_e1 * envs->ncomp_tensor; const int nf = envs->nf; const double *ri = envs->ri; const double *rj = envs->rj; const double *ai = env + bas(PTR_EXP, i_sh); const double *aj = env + bas(PTR_EXP, j_sh); const double *ci = env + bas(PTR_COEFF, i_sh); const double *cj = env + bas(PTR_COEFF, j_sh); double fac1i, fac1j; double aij, dij, eij; int ip, jp; int empty[3] = {1, 1, 1}; int *jempty = empty + 0; int *iempty = empty + 1; int *gempty = empty + 2; const size_t len1 = envs->g_size * 3 * (1<<envs->gbits) * NGv; const size_t leng = nf * n_comp * NGv; const size_t leni = nf * i_ctr * n_comp * NGv; size_t lenj = 0; if (n_comp > 1) { lenj = nf * i_ctr * j_ctr * n_comp * NGv; } double complex *g = malloc(sizeof(double complex) * (len1+leng+leni+lenj)); double complex *g1 = g + len1; double complex *gout, *gctri, *gctrj; if (n_comp == 1) { gctrj = gctr; } else { gctrj = g1; g1 += lenj; } if (j_ctr == 1) { gctri = gctrj; iempty = jempty; } else { gctri = g1; g1 += leni; } if (i_ctr == 1) { gout = gctri; gempty = iempty; } else { gout = g1; } void (*aopair_rr)(); if (i_l >= j_l) { aopair_rr = aopair_rr_igtj_lazy; } else { aopair_rr = aopair_rr_iltj_lazy; } int *idx = malloc(sizeof(int) * nf * 3); _g2c_index_xyz(idx, envs); double rrij = CINTsquare_dist(ri, rj); double fac1 = SQRTPI * M_PI * CINTcommon_fac_sp(i_l) * CINTcommon_fac_sp(j_l); *jempty = 1; for (jp = 0; jp < j_prim; jp++) { envs->aj = aj[jp]; if (j_ctr == 1) { fac1j = fac1 * cj[jp]; } else { fac1j = fac1; *iempty = 1; } for (ip = 0; ip < i_prim; ip++) { envs->ai = ai[ip]; aij = ai[ip] + aj[jp]; eij = (ai[ip] * aj[jp] / aij) * rrij; if (eij > EXPCUTOFF) { continue; } dij = exp(-eij) / (aij * sqrt(aij)); if (i_ctr == 1) { fac1i = fac1j * dij * ci[ip]; } else { fac1i = fac1j * dij; } (*aopair_rr)(g, ai[ip], aj[jp], envs, eval_gz, fac*fac1i, Gv, b, gxyz, gs, NGv); (*envs->f_gout)(g, gout, idx, envs, Gv, NGv, *gempty); if (i_ctr > 1) { prim_to_ctr(gctri, nf*n_comp*NGv, gout, i_prim, i_ctr, ci+ip, *iempty); } *iempty = 0; } if (!*iempty) { if (j_ctr > 1) { prim_to_ctr(gctrj, i_ctr*nf*n_comp*NGv, gctri, j_prim, j_ctr, cj+jp, *jempty); } *jempty = 0; } } if (n_comp > 1 && !*jempty) { transpose(gctr, gctrj, nf*i_ctr*j_ctr, n_comp, NGv); } free(g); free(idx); return !*jempty; } void GTO_Gv_general(double complex *out, double aij, double *rij, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; const double cutoff = EXPCUTOFF * aij * 4; int n; double kR, kk; for (n = 0; n < NGv; n++) { kk = kx[n] * kx[n] + ky[n] * ky[n] + kz[n] * kz[n]; if (kk < cutoff) { kR = kx[n] * rij[0] + ky[n] * rij[1] + kz[n] * rij[2]; out[n] = exp(-.25*kk/aij) * fac * (cos(kR) - sin(kR)*_Complex_I); } else { out[n] = 0; } } } /* * Gv = dot(b.T,gxyz) + kpt * kk = dot(Gv, Gv) * kr = dot(rij, Gv) = dot(rij,b.T, gxyz) + dot(rij,kpt) = dot(br, gxyz) + dot(rij,kpt) * out = fac * exp(-.25 * kk / aij) * (cos(kr) - sin(kr) * _Complex_I); * * b: the first 9 elements are 2\pi*inv(a^T), then 3 elements for k_{ij}, * followed by 3*NGv floats for Gbase */ void GTO_Gv_orth(double complex *out, double aij, double *rij, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int nx = gs[0]; const int ny = gs[1]; const int nz = gs[2]; double br[3]; // dot(rij, b) br[0] = rij[0] * b[0]; br[1] = rij[1] * b[4]; br[2] = rij[2] * b[8]; double *kpt = b + 9; double kr[3]; kr[0] = rij[0] * kpt[0]; kr[1] = rij[1] * kpt[1]; kr[2] = rij[2] * kpt[2]; double *Gxbase = b + 12; double *Gybase = Gxbase + nx; double *Gzbase = Gybase + ny; double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; double complex zbuf[nx+ny+nz]; double complex *csx = zbuf; double complex *csy = csx + nx; double complex *csz = csy + ny; double kkpool[nx+ny+nz]; double *kkx = kkpool; double *kky = kkx + nx; double *kkz = kky + ny; int *gx = gxyz; int *gy = gx + NGv; int *gz = gy + NGv; const double cutoff = EXPCUTOFF * aij * 4; int n, ix, iy, iz; double Gr; for (n = 0; n < nx+ny+nz; n++) { kkpool[n] = -1; } for (n = 0; n < NGv; n++) { ix = gx[n]; iy = gy[n]; iz = gz[n]; if (kkx[ix] < 0) { Gr = Gxbase[ix] * br[0] + kr[0]; kkx[ix] = .25 * kx[n]*kx[n] / aij; csx[ix] = exp(-kkx[ix]) * (cos(Gr)-sin(Gr)*_Complex_I); } if (kky[iy] < 0) { Gr = Gybase[iy] * br[1] + kr[1]; kky[iy] = .25 * ky[n]*ky[n] / aij; csy[iy] = exp(-kky[iy]) * (cos(Gr)-sin(Gr)*_Complex_I); } if (kkz[iz] < 0) { Gr = Gzbase[iz] * br[2] + kr[2]; kkz[iz] = .25 * kz[n]*kz[n] / aij; csz[iz] = fac * exp(-kkz[iz]) * (cos(Gr)-sin(Gr)*_Complex_I); } if (kkx[ix] + kky[iy] + kkz[iz] < cutoff) { out[n] = csx[ix] * csy[iy] * csz[iz]; } else { out[n] = 0; } } } void GTO_Gv_nonorth(double complex *out, double aij, double *rij, double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv) { const int nx = gs[0]; const int ny = gs[1]; const int nz = gs[2]; double br[3]; // dot(rij, b) br[0] = rij[0] * b[0]; br[0] += rij[1] * b[1]; br[0] += rij[2] * b[2]; br[1] = rij[0] * b[3]; br[1] += rij[1] * b[4]; br[1] += rij[2] * b[5]; br[2] = rij[0] * b[6]; br[2] += rij[1] * b[7]; br[2] += rij[2] * b[8]; double *kpt = b + 9; double kr[3]; kr[0] = rij[0] * kpt[0]; kr[1] = rij[1] * kpt[1]; kr[2] = rij[2] * kpt[2]; double *Gxbase = b + 12; double *Gybase = Gxbase + nx; double *Gzbase = Gybase + ny; double *kx = Gv; double *ky = kx + NGv; double *kz = ky + NGv; double complex zbuf[nx+ny+nz]; double complex *csx = zbuf; double complex *csy = csx + nx; double complex *csz = csy + ny; char empty[nx+ny+nz]; char *xempty = empty; char *yempty = xempty + nx; char *zempty = yempty + ny; memset(empty, 1, sizeof(char)*(nx+ny+nz)); int *gx = gxyz; int *gy = gx + NGv; int *gz = gy + NGv; const double cutoff = EXPCUTOFF * aij * 4; int n, ix, iy, iz; double Gr, kk; for (n = 0; n < NGv; n++) { ix = gx[n]; iy = gy[n]; iz = gz[n]; kk = kx[n] * kx[n] + ky[n] * ky[n] + kz[n] * kz[n]; if (kk < cutoff) { ix = gx[n]; iy = gy[n]; iz = gz[n]; if (xempty[ix]) { Gr = Gxbase[ix] * br[0] + kr[0]; csx[ix] = cos(Gr)-sin(Gr)*_Complex_I; xempty[ix] = 0; } if (yempty[iy]) { Gr = Gybase[iy] * br[1] + kr[1]; csy[iy] = cos(Gr)-sin(Gr)*_Complex_I; yempty[iy] = 0; } if (zempty[iz]) { Gr = Gzbase[iz] * br[2] + kr[2]; csz[iz] = fac * (cos(Gr)-sin(Gr)*_Complex_I); zempty[iz] = 0; } out[n] = exp(-.25*kk/aij) * csx[ix]*csy[iy]*csz[iz]; } else { out[n] = 0; } } } static void zcopy_ij(double complex *out, const double complex *gctr, const int mi, const int mj, const int ni, const size_t NGv) { int i, j, k; for (j = 0; j < mj; j++) { for (i = 0; i < mi; i++) { for (k = 0; k < NGv; k++) { out[i*NGv+k] = gctr[i*NGv+k]; } } out += ni * NGv; gctr += mi * NGv; } } void GTO_ft_c2s_cart(double complex *out, double complex *gctr, int *dims, CINTEnvVars *envs, size_t NGv) { const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int nfi = envs->nfi; const int nfj = envs->nfj; const int ni = nfi*i_ctr; const int nj = nfj*j_ctr; const int nf = envs->nf; int ic, jc; double complex *pout; for (jc = 0; jc < nj; jc += nfj) { for (ic = 0; ic < ni; ic += nfi) { pout = out + (dims[0] * jc + ic) * NGv; zcopy_ij(pout, gctr, nfi, nfj, dims[0], NGv); gctr += nf * NGv; } } } #define C2S(sph, nket, cart, l) \ (double complex *)CINTc2s_ket_sph((double *)(sph), nket, (double *)(cart), l) #define OF_CMPLX 2 void GTO_ft_c2s_sph(double complex *out, double complex *gctr, int *dims, CINTEnvVars *envs, size_t NGv) { const int i_l = envs->i_l; const int j_l = envs->j_l; const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int di = i_l * 2 + 1; const int dj = j_l * 2 + 1; const int ni = di*i_ctr; const int nj = dj*j_ctr; const int nfi = envs->nfi; const int nf = envs->nf; int ic, jc, k; const int buflen = nfi*dj; double complex *buf1 = malloc(sizeof(double complex) * buflen*2 * NGv); double complex *buf2 = buf1 + buflen * NGv; double complex *pout, *pij, *buf; for (jc = 0; jc < nj; jc += dj) { for (ic = 0; ic < ni; ic += di) { buf = C2S(buf1, nfi*NGv*OF_CMPLX, gctr, j_l); pij = C2S(buf2, NGv*OF_CMPLX, buf, i_l); for (k = NGv; k < dj*NGv; k+=NGv) { pout = C2S(buf2+k*di, NGv*OF_CMPLX, buf+k*nfi, i_l); } pout = out + (dims[0] * jc + ic) * NGv; zcopy_ij(pout, pij, di, dj, dims[0], NGv); gctr += nf * NGv; } } free(buf1); } static void _ft_zset0(double complex *out, int *dims, int *counts, int comp, size_t NGv) { double complex *pout; int i, j, k, ic; for (ic = 0; ic < comp; ic++) { for (j = 0; j < counts[1]; j++) { pout = out + j * dims[0] * NGv; for (i = 0; i < counts[0]; i++) { for (k = 0; k < NGv; k++) { pout[i*NGv+k] = 0; } } } out += dims[0] * dims[1] * NGv; } } /************************************************* * * eval_aopair is one of GTO_aopair_early_contract, * GTO_aopair_lazy_contract * * eval_gz is one of GTO_Gv_general, GTO_Gv_uniform_orth, * GTO_Gv_uniform_nonorth, GTO_Gv_nonuniform_orth * *************************************************/ int GTO_ft_aopair_drv(double complex *out, int *dims, int (*eval_aopair)(), FPtr_eval_gz eval_gz, void (*f_c2s)(), double complex fac, double *Gv, double *b, int *gxyz, int *gs, size_t NGv, CINTEnvVars *envs) { const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int n_comp = envs->ncomp_e1 * envs->ncomp_tensor; const size_t nc = envs->nf * i_ctr * j_ctr * NGv; double complex *gctr = malloc(sizeof(double complex) * nc * n_comp); if (eval_gz == NULL) { eval_gz = GTO_Gv_general; } if (eval_gz != GTO_Gv_general) { assert(gxyz != NULL); } if (eval_aopair == NULL) { const int *shls = envs->shls; const int *bas = envs->bas; const int i_sh = shls[0]; const int j_sh = shls[1]; const int i_prim = bas(NPRIM_OF, i_sh); const int j_prim = bas(NPRIM_OF, j_sh); if (i_prim*j_prim < i_ctr*j_ctr*3) { eval_aopair = GTO_aopair_lazy_contract; } else { eval_aopair = GTO_aopair_early_contract; } } int has_value = (*eval_aopair)(gctr, envs, eval_gz, fac, Gv, b, gxyz, gs, NGv); int counts[4]; if (f_c2s == &GTO_ft_c2s_sph) { counts[0] = (envs->i_l*2+1) * i_ctr; counts[1] = (envs->j_l*2+1) * j_ctr; } else { // f_c2s == &GTO_ft_c2s_cart counts[0] = envs->nfi * i_ctr; counts[1] = envs->nfj * j_ctr; } if (dims == NULL) { dims = counts; } size_t nout = dims[0] * dims[1] * NGv; int n; if (has_value) { for (n = 0; n < n_comp; n++) { (*f_c2s)(out+nout*n, gctr+nc*n, dims, envs, NGv); } } else { _ft_zset0(out, dims, counts, n_comp, NGv); } free(gctr); return has_value; } int GTO_ft_ovlp_cart(double complex *out, int *shls, int *dims, int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { CINTEnvVars envs; int ng[] = {0, 0, 0, 0, 0, 1, 0, 1}; GTO_ft_init1e_envs(&envs, ng, shls, atm, natm, bas, nbas, env); envs.f_gout = &inner_prod; return GTO_ft_aopair_drv(out, dims, eval_aopair, eval_gz, &GTO_ft_c2s_cart, fac, Gv, b, gxyz, gs, nGv, &envs); } int GTO_ft_ovlp_sph(double complex *out, int *shls, int *dims, int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { CINTEnvVars envs; int ng[] = {0, 0, 0, 0, 0, 1, 0, 1}; GTO_ft_init1e_envs(&envs, ng, shls, atm, natm, bas, nbas, env); envs.f_gout = &inner_prod; return GTO_ft_aopair_drv(out, dims, eval_aopair, eval_gz, &GTO_ft_c2s_sph, fac, Gv, b, gxyz, gs, nGv, &envs); } /************************************************* * *************************************************/ static void zcopy_s2_igtj(double complex *out, double complex *in, size_t NGv, int comp, int nij, int ip, int di, int dj) { const size_t ip1 = ip + 1; int i, j, n, ic; double complex *pin, *pout; for (ic = 0; ic < comp; ic++) { pout = out + ic * nij * NGv; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pin = in + NGv * (j*di+i); for (n = 0; n < NGv; n++) { pout[j*NGv+n] = pin[n]; } } pout += (ip1 + i) * NGv; } } } static void zcopy_s2_ieqj(double complex *out, double complex *in, size_t NGv, int comp, int nij, int ip, int di, int dj) { const size_t ip1 = ip + 1; int i, j, n, ic; double complex *pin, *pout; for (ic = 0; ic < comp; ic++) { pout = out + ic * nij * NGv; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { pin = in + NGv * (j*di+i); for (n = 0; n < NGv; n++) { pout[j*NGv+n] = pin[n]; } } pout += (ip1 + i) * NGv; } } } void GTO_ft_fill_s1(int (*intor)(), int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex *mat, int comp, int ish, int jsh, double complex *buf, int *shls_slice, int *ao_loc, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; ish += ish0; jsh += jsh0; const int nrow = ao_loc[ish1] - ao_loc[ish0]; const int ncol = ao_loc[jsh1] - ao_loc[jsh0]; const size_t off = ao_loc[ish] - ao_loc[ish0] + (ao_loc[jsh] - ao_loc[jsh0]) * nrow; int shls[2] = {ish, jsh}; int dims[2] = {nrow, ncol}; (*intor)(mat+off*nGv, shls, dims, eval_aopair, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); } void GTO_ft_fill_s1hermi(int (*intor)(), int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex *mat, int comp, int ish, int jsh, double complex *buf, int *shls_slice, int *ao_loc, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { return; } const int nrow = ao_loc[ish1] - ao_loc[ish0]; const int ncol = ao_loc[jsh1] - ao_loc[jsh0]; const size_t off = ao_loc[ish] - ao_loc[ish0] + (ao_loc[jsh] - ao_loc[jsh0]) * nrow; const size_t NGv = nGv; int shls[2] = {ish, jsh}; int dims[2] = {nrow, ncol}; (*intor)(mat+off*NGv, shls, dims, eval_aopair, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); if (ip != jp && ish0 == jsh0 && ish1 == jsh1) { const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; double complex *in = mat + off * NGv; double complex *out = mat + (ao_loc[jsh] - ao_loc[jsh0] + (ao_loc[ish] - ao_loc[ish0]) * nrow) * NGv; int i, j, n, ic; double complex *pout, *pin; for (ic = 0; ic < comp; ic++) { for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pin = in + NGv * (j*nrow+i); pout = out + NGv * (i*nrow+j); for (n = 0; n < nGv; n++) { pout[n] = pin[n]; } } } out += nrow * ncol * NGv; } } } void GTO_ft_fill_s2(int (*intor)(), int (*eval_aopair)(), FPtr_eval_gz eval_gz, double complex *mat, int comp, int ish, int jsh, double complex *buf, int *shls_slice, int *ao_loc, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { return; } const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int i0 = ao_loc[ish0]; const size_t off0 = i0 * (i0 + 1) / 2; const size_t off = ip * (ip + 1) / 2 - off0 + jp; const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0; const size_t NGv = nGv; int shls[2] = {ish, jsh}; int dims[2] = {di, dj}; (*intor)(buf, shls, dims, eval_aopair, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); if (ip != jp) { zcopy_s2_igtj(mat+off*NGv, buf, NGv, comp, nij, ip, di, dj); } else { zcopy_s2_ieqj(mat+off*NGv, buf, NGv, comp, nij, ip, di, dj); } } /* * Fourier transform AO pairs and add to mat (inplace) */ void GTO_ft_fill_drv(int (*intor)(), FPtr_eval_gz eval_gz, void (*fill)(), double complex *mat, int comp, int *shls_slice, int *ao_loc, double phase, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const double complex fac = cos(phase) + sin(phase)*_Complex_I; int (*eval_aopair)() = NULL; if (intor != &GTO_ft_ovlp_cart && intor != &GTO_ft_ovlp_sph) { eval_aopair = &GTO_aopair_lazy_contract; } #pragma omp parallel default(none) \ shared(intor, eval_gz, eval_aopair, fill, mat, comp, shls_slice, \ ao_loc, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env) { int i, j, ij; double complex *buf = malloc(sizeof(double complex) * NCTRMAX*NCTRMAX*comp*(size_t)nGv); #pragma omp for schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { i = ij / njsh; j = ij % njsh; (*fill)(intor, eval_aopair, eval_gz, mat, comp, i, j, buf, shls_slice, ao_loc, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); } free(buf); } } /* * Given npair of shls in shls_lst, FT their AO pair value and add to * out (inplace) */ void GTO_ft_fill_shls_drv(int (*intor)(), FPtr_eval_gz eval_gz, double complex *out, int comp, int npair, int *shls_lst, int *ao_loc, double phase, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { int n, di, dj, ish, jsh; int *ijloc = malloc(sizeof(int) * npair); ijloc[0] = 0; for (n = 1; n < npair; n++) { ish = shls_lst[n*2-2]; jsh = shls_lst[n*2-1]; di = ao_loc[ish+1] - ao_loc[ish]; dj = ao_loc[jsh+1] - ao_loc[jsh]; ijloc[n] = ijloc[n-1] + di*dj; } const double complex fac = cos(phase) + sin(phase)*_Complex_I; const size_t NGv = nGv; int (*eval_aopair)() = NULL; if (intor != &GTO_ft_ovlp_cart && intor != &GTO_ft_ovlp_sph) { eval_aopair = &GTO_aopair_lazy_contract; } #pragma omp parallel default(none) \ shared(intor, eval_gz, eval_aopair, out, comp, Gv, b, gxyz, gs, \ nGv, npair, shls_lst, ao_loc, \ atm, natm, bas, nbas, env, ijloc) \ private(n) { int ish, jsh; int dims[2]; #pragma omp for schedule(dynamic) for (n = 0; n < npair; n++) { ish = shls_lst[n*2 ]; jsh = shls_lst[n*2+1]; dims[0] = ao_loc[ish+1] - ao_loc[ish]; dims[1] = ao_loc[jsh+1] - ao_loc[jsh]; (*intor)(out+ijloc[n]*comp*NGv, shls_lst+n*2, dims, eval_aopair, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); } } free(ijloc); } /* * Reversed vrr2d. They are used by numint_uniform_grid.c */ void GTOplain_vrr2d_ket_inc1(double *out, const double *g, double *rirj, int li, int lj) { if (lj == 0) { memcpy(out, g, sizeof(double)*_LEN_CART[li]); return; } const int row_10 = _LEN_CART[li+1]; const int row_00 = _LEN_CART[li ]; const int col_00 = _LEN_CART[lj-1]; const double *g00 = g; const double *g10 = g + row_00*col_00; int i, j; const double *p00, *p10; double *p01 = out; for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)); p01[i] = p10[0] + rirj[0] * p00[0]; } p01 += row_00; } for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)); p01[i] = p10[0] + rirj[1] * p00[0]; } p01 += row_00; } j = STARTZ_IF_L_DEC1(lj); if (j < _LEN_CART[lj-1]) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)); p01[i] = p10[0] + rirj[2] * p00[0]; } } } void GTOreverse_vrr2d_ket_inc1(double *g01, double *g00, double *rirj, int li, int lj) { const int row_10 = _LEN_CART[li+1]; const int row_00 = _LEN_CART[li ]; const int col_00 = _LEN_CART[lj-1]; double *g10 = g00 + row_00*col_00; double *p00, *p10; int i, j; for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)); p10[0] += g01[i]; p00[0] += g01[i] * rirj[0]; } g01 += row_00; } for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)); p10[0] += g01[i]; p00[0] += g01[i] * rirj[1]; } g01 += row_00; } j = STARTZ_IF_L_DEC1(lj); if (j < _LEN_CART[lj-1]) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i); p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)); p10[0] += g01[i]; p00[0] += g01[i] * rirj[2]; } } }
test.c
#include <stdio.h> #include <omp.h> #pragma omp requires unified_shared_memory #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (992) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;}) #define ZERO(X) ZERO_ARRAY(N, X) int main(void) { check_offloading(); double A[N], B[N], C[N], D[N], E[N]; int fail = 0; INIT(); // ************************** // Series 1: no dist_schedule // ************************** // // Test: #iterations == #teams // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(512) #pragma omp distribute simd for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // **************************** // Series 2: with dist_schedule // **************************** // // Test: #iterations == #teams, dist_schedule(1) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(512) #pragma omp distribute simd dist_schedule(static,1) for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations == #teams, dist_schedule(#iterations) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(512) #pragma omp distribute simd dist_schedule(static,512) for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations == #teams, dist_schedule(#iterations/10), variable chunk size // ZERO(A); int ten = 10; int chunkSize = 512/ten; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(512) #pragma omp distribute simd dist_schedule(static,chunkSize) for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams, dist_schedule(1) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd dist_schedule(static,1) for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams, dist_schedule(#iterations) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd dist_schedule(static,500) for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams, dist_schedule(#iterations/10), variable chunk size // ZERO(A); ten = 10; chunkSize = 500/ten; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd dist_schedule(static,chunkSize) for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams, dist_schedule(1) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd dist_schedule(static,1) for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams, dist_schedule(#iterations) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd dist_schedule(static,123) for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams, dist_schedule(#iterations) // ZERO(A); ten = 10; chunkSize = 123/ten; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute simd dist_schedule(static,chunkSize) for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // **************************** // Series 3: with ds attributes // **************************** // // Test: private // ZERO(A); ZERO(B); double p = 2.0, q = 4.0; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) { #pragma omp distribute simd private(p,q) for(int i = 0 ; i < N ; i++) { p = 2; q = 3; A[i] += p; B[i] += q; } } } for(int i = 0 ; i < N ; i++) { if (A[i] != TRIALS*2) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) TRIALS*2, A[i]); fail = 1; } if (B[i] != TRIALS*3) { printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) TRIALS*3, B[i]); fail = 1; } } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: firstprivate // ZERO(A); ZERO(B); p = 2.0, q = 4.0; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target // implicit firstprivate for p and q, their initial values being 2 and 4 for each target invocation #pragma omp teams num_teams(64) { #pragma omp distribute simd firstprivate(p,q) for(int i = 0 ; i < 128 ; i++) { // 2 iterations for each team p += 3.0; // p and q are firstprivate to the team, and as such incremented twice (2 iterations per team) q += 7.0; A[i] += p; B[i] += q; } } } for(int i = 0 ; i < 128 ; i++) { if (i % 2 == 0) { if (A[i] != (2.0+3.0)*TRIALS) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]); fail = 1; } if (B[i] != (4.0+7.0)*TRIALS) { printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0)*TRIALS, B[i]); fail = 1; } } else { if (A[i] != (2.0+3.0*2)*TRIALS) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0*2)*TRIALS, A[i]); fail = 1; } if (B[i] != (4.0+7.0*2)*TRIALS) { printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0*2)*TRIALS, B[i]); fail = 1; } } } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: lastprivate // int lastpriv = -1; #pragma omp target map(tofrom:lastpriv) #pragma omp teams num_teams(10) #pragma omp distribute simd lastprivate(lastpriv) for(int i = 0 ; i < omp_get_num_teams() ; i++) lastpriv = omp_get_team_num(); if(lastpriv != 9) { printf("lastpriv value is %d and should have been %d\n", lastpriv, 9); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // ************************** // Series 4: collapse // ************************** // // Test: 2 loops // double * S = malloc(N*N*sizeof(double)); double * T = malloc(N*N*sizeof(double)); double * U = malloc(N*N*sizeof(double)); for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) { S[i*N+j] = 0.0; T[i*N+j] = 1.0; U[i*N+j] = 2.0; } for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target map(tofrom:S[:N*N]), map(to:T[:N*N],U[:N*N]) #pragma omp teams num_teams(512) #pragma omp distribute simd collapse(2) for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) S[i*N+j] += T[i*N+j] + U[i*N+j]; // += 3 at each t } for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) if (S[i*N+j] != TRIALS*3.0) { printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, S[i*N+j]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: 3 loops // int M = N/8; double * V = malloc(M*M*M*sizeof(double)); double * Z = malloc(M*M*M*sizeof(double)); for (int i = 0 ; i < M ; i++) for (int j = 0 ; j < M ; j++) for (int k = 0 ; k < M ; k++) { V[i*M*M+j*M+k] = 2.0; Z[i*M*M+j*M+k] = 3.0; } for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target map(tofrom:V[:M*M*M]), map(to:Z[:M*M*M]) #pragma omp teams num_teams(512) #pragma omp distribute simd collapse(3) for (int i = 0 ; i < M ; i++) for (int j = 0 ; j < M ; j++) for (int k = 0 ; k < M ; k++) V[i*M*M+j*M+k] += Z[i*M*M+j*M+k]; // += 3 at each t } for (int i = 0 ; i < M ; i++) for (int j = 0 ; j < M ; j++) for (int k = 0 ; k < M ; k++) if (V[i*M*M+j*M+k] != 2.0+TRIALS*3.0) { printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, V[i*M*M+j*M+k]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); return 0; }
c-omp.c
/* Modula-3: modified */ /* This file contains routines to construct GNU OpenMP constructs, called from parsing in the C and C++ front ends. Copyright (C) 2005, 2007, 2008, 2009, 2010 Free Software Foundation, Inc. Contributed by Richard Henderson <rth@redhat.com>, Diego Novillo <dnovillo@redhat.com>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "function.h" #include "c-common.h" #include "toplev.h" #include "gimple.h" #include "bitmap.h" #include "langhooks.h" #ifdef __cplusplus extern "C" { #endif /* Complete a #pragma omp master construct. STMT is the structured-block that follows the pragma. LOC is the l*/ tree c_finish_omp_master (location_t loc, tree stmt) { tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt)); SET_EXPR_LOCATION (t, loc); return t; } /* Complete a #pragma omp critical construct. STMT is the structured-block that follows the pragma, NAME is the identifier in the pragma, or null if it was omitted. LOC is the location of the #pragma. */ tree c_finish_omp_critical (location_t loc, tree body, tree name) { tree stmt = make_node (OMP_CRITICAL); TREE_TYPE (stmt) = void_type_node; OMP_CRITICAL_BODY (stmt) = body; OMP_CRITICAL_NAME (stmt) = name; SET_EXPR_LOCATION (stmt, loc); return add_stmt (stmt); } /* Complete a #pragma omp ordered construct. STMT is the structured-block that follows the pragma. LOC is the location of the #pragma. */ tree c_finish_omp_ordered (location_t loc, tree stmt) { tree t = build1 (OMP_ORDERED, void_type_node, stmt); SET_EXPR_LOCATION (t, loc); return add_stmt (t); } /* Complete a #pragma omp barrier construct. LOC is the location of the #pragma. */ void c_finish_omp_barrier (location_t loc) { tree x; x = built_in_decls[BUILT_IN_GOMP_BARRIER]; x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Complete a #pragma omp taskwait construct. LOC is the location of the pragma. */ void c_finish_omp_taskwait (location_t loc) { tree x; x = built_in_decls[BUILT_IN_GOMP_TASKWAIT]; x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Complete a #pragma omp atomic construct. The expression to be implemented atomically is LHS code= RHS. LOC is the location of the atomic statement. The value returned is either error_mark_node (if the construct was erroneous) or an OMP_ATOMIC node which should be added to the current statement tree with add_stmt.*/ tree c_finish_omp_atomic (location_t loc, enum tree_code code, tree lhs, tree rhs) { tree x, type, addr; if (lhs == error_mark_node || rhs == error_mark_node) return error_mark_node; /* ??? According to one reading of the OpenMP spec, complex type are supported, but there are no atomic stores for any architecture. But at least icc 9.0 doesn't support complex types here either. And lets not even talk about vector types... */ type = TREE_TYPE (lhs); if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type) && !SCALAR_FLOAT_TYPE_P (type)) { error_at (loc, "invalid expression type for %<#pragma omp atomic%>"); return error_mark_node; } /* ??? Validate that rhs does not overlap lhs. */ /* Take and save the address of the lhs. From then on we'll reference it via indirection. */ addr = build_unary_op (loc, ADDR_EXPR, lhs, 0); if (addr == error_mark_node) return error_mark_node; addr = save_expr (addr); if (TREE_CODE (addr) != SAVE_EXPR && (TREE_CODE (addr) != ADDR_EXPR || TREE_CODE (TREE_OPERAND (addr, 0)) != VAR_DECL)) { /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize it even after unsharing function body. */ tree var = create_tmp_var_raw (TREE_TYPE (addr), NULL); DECL_CONTEXT (var) = current_function_decl; addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL); } lhs = build_indirect_ref (loc, addr, RO_NULL); /* There are lots of warnings, errors, and conversions that need to happen in the course of interpreting a statement. Use the normal mechanisms to do this, and then take it apart again. */ x = build_modify_expr (input_location, lhs, NULL_TREE, code, input_location, rhs, NULL_TREE); if (x == error_mark_node) return error_mark_node; gcc_assert (TREE_CODE (x) == MODIFY_EXPR); rhs = TREE_OPERAND (x, 1); /* Punt the actual generation of atomic operations to common code. */ x = build2 (OMP_ATOMIC, void_type_node, addr, rhs); SET_EXPR_LOCATION (x, loc); return x; } /* Complete a #pragma omp flush construct. We don't do anything with the variable list that the syntax allows. LOC is the location of the #pragma. */ void c_finish_omp_flush (location_t loc) { tree x; x = built_in_decls[BUILT_IN_SYNCHRONIZE]; x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Check and canonicalize #pragma omp for increment expression. Helper function for c_finish_omp_for. */ static tree check_omp_for_incr_expr (location_t loc, tree exp, tree decl) { tree t; if (!INTEGRAL_TYPE_P (TREE_TYPE (exp)) || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl))) return error_mark_node; if (exp == decl) return build_int_cst (TREE_TYPE (exp), 0); switch (TREE_CODE (exp)) { CASE_CONVERT: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_convert_loc (loc, TREE_TYPE (exp), t); break; case MINUS_EXPR: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); break; case PLUS_EXPR: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl); if (t != error_mark_node) return fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (exp), TREE_OPERAND (exp, 0), t); break; default: break; } return error_mark_node; } /* Validate and emit code for the OpenMP directive #pragma omp for. DECLV is a vector of iteration variables, for each collapsed loop. INITV, CONDV and INCRV are vectors containing initialization expressions, controlling predicates and increment expressions. BODY is the body of the loop and PRE_BODY statements that go before the loop. */ tree c_finish_omp_for (location_t locus, tree declv, tree initv, tree condv, tree incrv, tree body, tree pre_body) { location_t elocus; bool fail = false; int i; gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv)); for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { tree decl = TREE_VEC_ELT (declv, i); tree init = TREE_VEC_ELT (initv, i); tree cond = TREE_VEC_ELT (condv, i); tree incr = TREE_VEC_ELT (incrv, i); elocus = locus; if (EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); /* Validate the iteration variable. */ if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)) && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE) { error_at (elocus, "invalid type for iteration variable %qE", decl); fail = true; } /* In the case of "for (int i = 0...)", init will be a decl. It should have a DECL_INITIAL that we can turn into an assignment. */ if (init == decl) { elocus = DECL_SOURCE_LOCATION (decl); init = DECL_INITIAL (decl); if (init == NULL) { error_at (elocus, "%qE is not initialized", decl); init = integer_zero_node; fail = true; } init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR, /* FIXME diagnostics: This should be the location of the INIT. */ elocus, init, NULL_TREE); } gcc_assert (TREE_CODE (init) == MODIFY_EXPR); gcc_assert (TREE_OPERAND (init, 0) == decl); if (cond == NULL_TREE) { error_at (elocus, "missing controlling predicate"); fail = true; } else { bool cond_ok = false; if (EXPR_HAS_LOCATION (cond)) elocus = EXPR_LOCATION (cond); if (TREE_CODE (cond) == LT_EXPR || TREE_CODE (cond) == LE_EXPR || TREE_CODE (cond) == GT_EXPR || TREE_CODE (cond) == GE_EXPR || TREE_CODE (cond) == NE_EXPR || TREE_CODE (cond) == EQ_EXPR) { tree op0 = TREE_OPERAND (cond, 0); tree op1 = TREE_OPERAND (cond, 1); /* 2.5.1. The comparison in the condition is computed in the type of DECL, otherwise the behavior is undefined. For example: long n; int i; i < n; according to ISO will be evaluated as: (long)i < n; We want to force: i < (int)n; */ if (TREE_CODE (op0) == NOP_EXPR && decl == TREE_OPERAND (op0, 0)) { TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0); TREE_OPERAND (cond, 1) = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), TREE_OPERAND (cond, 1)); } else if (TREE_CODE (op1) == NOP_EXPR && decl == TREE_OPERAND (op1, 0)) { TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0); TREE_OPERAND (cond, 0) = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), TREE_OPERAND (cond, 0)); } if (decl == TREE_OPERAND (cond, 0)) cond_ok = true; else if (decl == TREE_OPERAND (cond, 1)) { TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond))); TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0); TREE_OPERAND (cond, 0) = decl; cond_ok = true; } if (TREE_CODE (cond) == NE_EXPR || TREE_CODE (cond) == EQ_EXPR) { if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))) cond_ok = false; else if (operand_equal_p (TREE_OPERAND (cond, 1), TYPE_MIN_VALUE (TREE_TYPE (decl)), 0)) TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR ? GT_EXPR : LE_EXPR); else if (operand_equal_p (TREE_OPERAND (cond, 1), TYPE_MAX_VALUE (TREE_TYPE (decl)), 0)) TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR ? LT_EXPR : GE_EXPR); else cond_ok = false; } } if (!cond_ok) { error_at (elocus, "invalid controlling predicate"); fail = true; } } if (incr == NULL_TREE) { error_at (elocus, "missing increment expression"); fail = true; } else { bool incr_ok = false; if (EXPR_HAS_LOCATION (incr)) elocus = EXPR_LOCATION (incr); /* Check all the valid increment expressions: v++, v--, ++v, --v, v = v + incr, v = incr + v and v = v - incr. */ switch (TREE_CODE (incr)) { case POSTINCREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREDECREMENT_EXPR: if (TREE_OPERAND (incr, 0) != decl) break; incr_ok = true; if (POINTER_TYPE_P (TREE_TYPE (decl)) && TREE_OPERAND (incr, 1)) { tree t = fold_convert_loc (elocus, sizetype, TREE_OPERAND (incr, 1)); if (TREE_CODE (incr) == POSTDECREMENT_EXPR || TREE_CODE (incr) == PREDECREMENT_EXPR) t = fold_build1_loc (elocus, NEGATE_EXPR, sizetype, t); t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (decl), decl, t); incr = build2 (MODIFY_EXPR, void_type_node, decl, t); } break; case MODIFY_EXPR: if (TREE_OPERAND (incr, 0) != decl) break; if (TREE_OPERAND (incr, 1) == decl) break; if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl)) incr_ok = true; else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR || (TREE_CODE (TREE_OPERAND (incr, 1)) == POINTER_PLUS_EXPR)) && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl) incr_ok = true; else { tree t = check_omp_for_incr_expr (elocus, TREE_OPERAND (incr, 1), decl); if (t != error_mark_node) { incr_ok = true; t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t); incr = build2 (MODIFY_EXPR, void_type_node, decl, t); } } break; default: break; } if (!incr_ok) { error_at (elocus, "invalid increment expression"); fail = true; } } TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (incrv, i) = incr; } if (fail) return NULL; else { tree t = make_node (OMP_FOR); TREE_TYPE (t) = void_type_node; OMP_FOR_INIT (t) = initv; OMP_FOR_COND (t) = condv; OMP_FOR_INCR (t) = incrv; OMP_FOR_BODY (t) = body; OMP_FOR_PRE_BODY (t) = pre_body; SET_EXPR_LOCATION (t, locus); return add_stmt (t); } } /* Divide CLAUSES into two lists: those that apply to a parallel construct, and those that apply to a work-sharing construct. Place the results in *PAR_CLAUSES and *WS_CLAUSES respectively. In addition, add a nowait clause to the work-sharing list. LOC is the location of the OMP_PARALLEL*. */ void c_split_parallel_clauses (location_t loc, tree clauses, tree *par_clauses, tree *ws_clauses) { tree next; *par_clauses = NULL; *ws_clauses = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); for (; clauses ; clauses = next) { next = OMP_CLAUSE_CHAIN (clauses); switch (OMP_CLAUSE_CODE (clauses)) { case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_SHARED: case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_LASTPRIVATE: case OMP_CLAUSE_REDUCTION: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_DEFAULT: OMP_CLAUSE_CHAIN (clauses) = *par_clauses; *par_clauses = clauses; break; case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_COLLAPSE: OMP_CLAUSE_CHAIN (clauses) = *ws_clauses; *ws_clauses = clauses; break; default: gcc_unreachable (); } } } /* True if OpenMP sharing attribute of DECL is predetermined. */ enum omp_clause_default_kind c_omp_predetermined_sharing (tree decl) { /* Variables with const-qualified type having no mutable member are predetermined shared. */ if (TREE_READONLY (decl)) return OMP_CLAUSE_DEFAULT_SHARED; return OMP_CLAUSE_DEFAULT_UNSPECIFIED; } #ifdef __cplusplus } /* extern "C" */ #endif
MBIRModularUtilities3D.c
#include "MBIRModularUtilities3D.h" void forwardProject3DCone( float *Ax, float *x, struct ImageParams *imgParams, struct SysMatrix *A, struct SinoParams *sinoParams) { long int j_u, j_x, j_y, i_beta, i_v, j_z, i_w; float B_ij, B_ij_times_x_j; setFloatArray2Value( &Ax[0], sinoParams->N_beta*sinoParams->N_dv*sinoParams->N_dw, 0); #pragma omp parallel for private(j_x, j_y, j_u, i_v, B_ij, j_z, B_ij_times_x_j, i_w) for (i_beta = 0; i_beta <= sinoParams->N_beta-1; ++i_beta) { for (j_x = 0; j_x <= imgParams->N_x-1; ++j_x) { for (j_y = 0; j_y <= imgParams->N_y-1; ++j_y) { j_u = A->j_u[j_x][j_y][i_beta]; for (i_v = A->i_vstart[j_x][j_y][i_beta]; i_v < A->i_vstart[j_x][j_y][i_beta]+A->i_vstride[j_x][j_y][i_beta] ; ++i_v) { B_ij = A->B_ij_scaler * A->B[j_x][j_y][i_beta*A->i_vstride_max + i_v-A->i_vstart[j_x][j_y][i_beta]]; for (j_z = 0; j_z <= imgParams->N_z-1; ++j_z) { B_ij_times_x_j = B_ij * x[index_3D(j_x,j_y,j_z,imgParams->N_y,imgParams->N_z)]; for (i_w = A->i_wstart[j_u][j_z]; i_w < A->i_wstart[j_u][j_z]+A->i_wstride[j_u][j_z]; ++i_w) { Ax[index_3D(i_beta,i_v,i_w,sinoParams->N_dv,sinoParams->N_dw)] += B_ij_times_x_j * A->C_ij_scaler * A->C[j_u][j_z*A->i_wstride_max + i_w-A->i_wstart[j_u][j_z]]; } } } } } } } void backProjectlike3DCone( float ***x_out, float ***y_in, struct ImageParams *imgParams, struct SysMatrix *A, struct SinoParams *sinoParams, char mode) { long int j_u, j_x, j_y, i_beta, i_v, j_z, i_w; float B_ij, A_ij; float ticToc; float ***normalization, val, val2; tic(&ticToc); #pragma omp parallel for private(j_y, j_z) for (j_x = 0; j_x <= imgParams->N_x-1; ++j_x) { for (j_y = 0; j_y <= imgParams->N_y-1 ; ++j_y) { for (j_z = 0; j_z <= imgParams->N_z-1; ++j_z) { x_out[j_x][j_y][j_z] = 0; } } } printf("mode: %d\n", mode); #pragma omp parallel for private(j_x, j_y, j_u, i_v, B_ij, j_z, i_w, A_ij, val) for (i_beta = 0; i_beta <= sinoParams->N_beta-1; ++i_beta) { for (j_x = 0; j_x <= imgParams->N_x-1; ++j_x) { for (j_y = 0; j_y <= imgParams->N_y-1; ++j_y) { if(isInsideMask(j_x, j_y, imgParams->N_x, imgParams->N_y)) { j_u = A->j_u[j_x][j_y][i_beta]; for (i_v = A->i_vstart[j_x][j_y][i_beta]; i_v < A->i_vstart[j_x][j_y][i_beta]+A->i_vstride[j_x][j_y][i_beta] ; ++i_v) { B_ij = A->B_ij_scaler * A->B[j_x][j_y][i_beta*A->i_vstride_max + i_v-A->i_vstart[j_x][j_y][i_beta]]; for (j_z = 0; j_z <= imgParams->N_z-1; ++j_z) { for (i_w = A->i_wstart[j_u][j_z]; i_w < A->i_wstart[j_u][j_z]+A->i_wstride[j_u][j_z]; ++i_w) { A_ij = B_ij * A->C_ij_scaler * A->C[j_u][j_z*A->i_wstride_max + i_w-A->i_wstart[j_u][j_z]]; if(mode==0){ /* normal backprojection */ x_out[j_x][j_y][j_z] += A_ij * y_in[i_beta][i_v][i_w] ; } else if(mode==1){ /* entropy */ val = A_ij * y_in[i_beta][i_v][i_w]; if (val!=0){ x_out[j_x][j_y][j_z] += val * log(val) ; } normalization[j_x][j_y][j_z] += A_ij * y_in[i_beta][i_v][i_w] ; } else if(mode==2){ /* kappa */ x_out[j_x][j_y][j_z] += A_ij * y_in[i_beta][i_v][i_w] * A_ij ; } } } } } } } } printf("mode: %d\n", mode); if(mode==1) { /* compute entropy in bits after normalization */ #pragma omp parallel for private(j_y, j_z, val, val2) for (j_x = 0; j_x <= imgParams->N_x-1; ++j_x) { for (j_y = 0; j_y <= imgParams->N_y-1; ++j_y) { for (j_z = 0; j_z <= imgParams->N_z-1; ++j_z) { val = x_out[j_x][j_y][j_z]; val2 = normalization[j_x][j_y][j_z]; if (val2==0){ x_out[j_x][j_y][j_z] = 0; } else{ x_out[j_x][j_y][j_z] = (log(val2) - val/val2)/log(2); } } } } multifree((void***)normalization, 3); } toc(&ticToc); ticTocDisp(ticToc, "backProjectlike3DCone"); } void computeSecondaryReconParams(struct ReconParams *reconParams, struct ImageParams *imgParams) { float sum; int N_max, N_z; sum = 0; sum += reconParams->bFace>=0 ? 6*reconParams->bFace : 0; sum += reconParams->bEdge>=0 ? 12*reconParams->bEdge : 0; sum += reconParams->bVertex>=0 ? 8*reconParams->bVertex : 0; if (sum<=0) { fprintf(stderr, "Error in computeSecondaryReconParams: at least one neighbor weight needs to be positive\n"); exit(-1); } if(reconParams->bFace>=0) reconParams->bFace /= sum; if(reconParams->bEdge>=0) reconParams->bEdge /= sum; if(reconParams->bVertex>=0) reconParams->bVertex /= sum; N_z = imgParams->N_z; N_max = reconParams->numVoxelsPerZiplineMax; reconParams->numVoxelsPerZipline = ceil((float)N_z / ceil((float)N_z/N_max)); reconParams->numZiplines = ceil((float)N_z / reconParams->numVoxelsPerZipline); } void invertDoubleMatrix(float **A, float ** A_inv, int size) { float det; if(size == 1) { A_inv[0][0] = 1.0 / A[0][0]; return; } if(size == 2) { /** * [ a b ] 1 [ d -b ] * A = [ c d ] then A^-1 = ------- [ -c a ] * ad - bc */ det = A[0][0]*A[1][1] - A[0][1]*A[1][0]; A_inv[0][0] = A[1][1] / det; A_inv[0][1] = - A[0][1] / det; A_inv[1][0] = - A[1][0] / det; A_inv[1][1] = A[0][0] / det; return; } if(size>2) { fprintf(stderr, "Error in invertDoubleMatrix: only works for sizes 1 to 2\n"); exit(-1); } } float computeNormSquaredFloatArray(float *arr, long int len) { /* out = ||x||^2*/ long int i; float out = 0; for (i = 0; i < len; ++i) { out += (arr[i]*arr[i]); } return out; } float computeRelativeRMSEFloatArray(float *arr1, float *arr2, long int len) { /** * out = sqrt(numerator/denominator) * * numerator = ||x1-x2||^2 denominator = ||max(x1,x2)||^2 */ long int i; float numerator = 0, denominator = 0, m; for (i = 0; i < len; ++i) { numerator += (arr1[i]-arr2[i])*(arr1[i]-arr2[i]); m = _MAX_(arr1[i], arr2[i]); denominator += m*m; } return sqrt(numerator/denominator); } float computeSinogramWeightedNormSquared(struct Sino *sino, float *arr) { /** * 1 || ||2 * normError = --- || arr || * M || ||L * * normError = weightScaler_value * * Weight_true = Weight / weightScaler_value */ long int i_beta, i_v, i_w; long int num_mask; float normError = 0; for (i_beta = 0; i_beta < sino->params.N_beta; ++i_beta) for (i_v = 0; i_v < sino->params.N_dv; ++i_v) for (i_w = 0; i_w < sino->params.N_dw; ++i_w) { normError += arr[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)] * sino->wgt[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)] * arr[index_3D(i_beta,i_v,i_w,sino->params.N_dv,sino->params.N_dw)]; } num_mask = sino->params.N_beta * sino->params.N_dv * sino->params.N_dw; normError /= num_mask; return normError; } char isInsideMask(long int i_1, long int i_2, long int N1, long int N2) { /** * returns 1 iff pixel is inside the ellipse that fits in the rectangle */ float center_1, center_2; float radius_1, radius_2; float reldistance; center_1 = (N1-1.0)/2.0; center_2 = (N2-1.0)/2.0; radius_1 = N1/2.0; radius_2 = N2/2.0; reldistance = pow((i_1-center_1)/radius_1, 2) + pow((i_2-center_2)/radius_2, 2); return (reldistance<1 ? 1 : 0); } long int computeNumVoxelsInImageMask(struct Image *img) { long int j_x, j_y; long int count = 0; for (j_x = 0; j_x < img->params.N_x; ++j_x) for (j_y = 0; j_y < img->params.N_y; ++j_y) { count = count + 1*isInsideMask(j_x, j_y, img->params.N_x, img->params.N_y); } count = count*img->params.N_z; return count; } void copyImage2ROI(struct Image *img) { long int j_x, j_y, j_z; long int j_xstart, j_xstop, j_ystart, j_ystop, j_zstart, j_zstop; long int N_x_roi, N_y_roi; j_xstart = img->params.j_xstart_roi; j_xstop = img->params.j_xstop_roi; j_ystart = img->params.j_ystart_roi; j_ystop = img->params.j_ystop_roi; j_zstart = img->params.j_zstart_roi; j_zstop = img->params.j_zstop_roi; N_x_roi = img->params.j_xstop_roi - img->params.j_xstart_roi + 1; N_y_roi = img->params.j_ystop_roi - img->params.j_ystart_roi + 1; for (j_x = j_xstart; j_x <= j_xstop; ++j_x) { for (j_y = j_ystart; j_y <= j_ystop; ++j_y) { for (j_z = j_zstart; j_z <= j_zstop; ++j_z) { img->vox_roi[j_x-j_xstart][j_y-j_ystart][j_z-j_zstart] = img->vox[index_3D(j_x,j_y,j_z,img->params.N_y, img->params.N_z)] * isInsideMask(j_x-img->params.j_xstart_roi, j_y-img->params.j_ystart_roi, N_x_roi, N_y_roi); } } } } void applyMask3D(float ***arr, long int N1, long int N2, long int N3) { long int i1, i2, i3; int b; for (i1 = 0; i1 < N1; ++i1) { for (i2 = 0; i2 < N2; ++i2) { b = isInsideMask(i1, i2, N1, N2); for (i3 = 0; i3 < N3; ++i3) { arr[i1][i2][i3] *= b; } } } } void applyMask(float *arr, long int N1, long int N2, long int N3) { long int i1, i2, i3; int b; for (i1 = 0; i1 < N1; ++i1) { for (i2 = 0; i2 < N2; ++i2) { b = isInsideMask(i1, i2, N1, N2); for (i3 = 0; i3 < N3; ++i3) { arr[index_3D(i1,i2,i3,N2,N3)] *= b; } } } } void floatArray_z_equals_aX_plus_bY(float *Z, float a, float *X, float b, float *Y, long int len) { long int i; for (i = 0; i < len; ++i) { Z[i] = a*X[i] + b*Y[i]; } } void setFloatArray2Value(float *arr, long int len, float value) { long int i; for (i = 0; i < len; ++i) { arr[i] = value; } } void setUCharArray2Value(unsigned char *arr, long int len, unsigned char value) { long int i; for (i = 0; i < len; ++i) { arr[i] = value; } } void* allocateSinoData3DCone(struct SinoParams *params, int dataTypeSize) { return mget_spc(params->N_beta*params->N_dv*params->N_dw, dataTypeSize); } void*** allocateImageData3DCone( struct ImageParams *params, int dataTypeSize, int isROI) { long int N_x_roi, N_y_roi, N_z_roi; N_x_roi = params->j_xstop_roi - params->j_xstart_roi + 1; N_y_roi = params->j_ystop_roi - params->j_ystart_roi + 1; N_z_roi = params->j_zstop_roi - params->j_zstart_roi + 1; if (isROI) { return multialloc(dataTypeSize, 3, N_x_roi, N_y_roi, N_z_roi); } else { return multialloc(dataTypeSize, 3, params->N_x, params->N_y, params->N_z); } } void freeViewAngleList(struct ViewAngleList *list) { free((void*)list->beta); } /**************************************** stuff for random update ****************************************/ void RandomZiplineAux_allocate(struct RandomZiplineAux *aux, struct ImageParams *imgParams, struct ReconParams *reconParams) { long int N_x, N_y, N_z; N_x = imgParams->N_x; N_y = imgParams->N_y; N_z = imgParams->N_z; /** * Initialize orderXY */ aux->orderXY = mget_spc(N_x * N_y, sizeof(int)); /** * Initialize groupIndex */ aux->groupIndex = (unsigned char***) multialloc(sizeof(unsigned char***), 3, N_x, N_y, N_z); } void RandomZiplineAux_Initialize(struct RandomZiplineAux *aux, struct ImageParams *imgParams, struct ReconParams *reconParams, int N_M_max) { long int N_x, N_y; long int j_xy; /** * Initialize N_G */ aux->N_G = reconParams->N_G; aux->N_M_max = N_M_max; N_x = imgParams->N_x; N_y = imgParams->N_y; /** * Initialize orderXY */ for (j_xy = 0; j_xy < N_x*N_y; ++j_xy) { aux->orderXY[j_xy] = j_xy; } } void RandomAux_allocate(struct RandomAux *aux, struct ImageParams *imgParams) { aux->orderXYZ = mget_spc(imgParams->N_x * imgParams->N_y * imgParams->N_z, sizeof(long )); } void RandomAux_Initialize(struct RandomAux *aux, struct ImageParams *imgParams) { long int N_x, N_y, N_z; long int j_xyz; N_x = imgParams->N_x; N_y = imgParams->N_y; N_z = imgParams->N_z; /** * Initialize orderXY */ for (j_xyz = 0; j_xyz < N_x*N_y*N_z; ++j_xyz) { aux->orderXYZ[j_xyz] = j_xyz; } } void RandomZiplineAux_free(struct RandomZiplineAux *aux) { free((void*)aux->orderXY); multifree((void***)aux->groupIndex, 3); } void RandomAux_free(struct RandomAux *aux) { free((void*)aux->orderXYZ); } void RandomZiplineAux_ShuffleGroupIndices(struct RandomZiplineAux *aux, struct ImageParams *imgParams) { long int j_x, j_y, j_z, N_G, r; N_G = aux->N_G; /* srand(time(NULL)); */ for(j_x = 0; j_x < imgParams->N_x; j_x++) { for (j_y = 0; j_y < imgParams->N_y; ++j_y) { /* random[1,N_G-1]*/ aux->groupIndex[j_x][j_y][0] = rand() % N_G; for (j_z = 1; j_z < imgParams->N_z; ++j_z) { /* r \in [1, ..., N_G-1] */ r = 1 + (rand() % (N_G-1)); /* next index is any of the other N_G-1 indices (uniformly random) */ aux->groupIndex[j_x][j_y][j_z] = (aux->groupIndex[j_x][j_y][j_z-1] + r) % N_G; } } } } void RandomZiplineAux_ShuffleGroupIndices_FixedDistance(struct RandomZiplineAux *aux, struct ImageParams *imgParams) { long int j_x, j_y, j_z, N_G, i; int *first_N_G_members; N_G = aux->N_G; /* srand(time(NULL)); */ first_N_G_members = mget_spc(N_G, sizeof(int)); /* Initialize first_N_G_members with 0, 1, ..., N_G-1 */ for (i = 0; i < N_G; ++i) { first_N_G_members[i] = i; } for(j_x = 0; j_x < imgParams->N_x; j_x++) { for (j_y = 0; j_y < imgParams->N_y; ++j_y) { shuffleIntArray(first_N_G_members, N_G); for (j_z = 0; j_z < imgParams->N_z; ++j_z) { /* output array has the first N_G members repeated */ aux->groupIndex[j_x][j_y][j_z] = first_N_G_members[j_z % N_G]; } } } } void RandomZiplineAux_shuffleOrderXY(struct RandomZiplineAux *aux, struct ImageParams *imgParams) { shuffleIntArray(aux->orderXY, imgParams->N_x*imgParams->N_y); } void indexExtraction2D(long int j_xy, long int *j_x, long int N_x, long int *j_y, long int N_y) { /* j_xy = j_y + N_y j_x */ *j_y = j_xy % N_y; *j_x = (j_xy - *j_y) / N_y; return; } void shuffleIntArray(int *arr, long int len) { int target_idx, candidate_idx, target, candidate; /*srand(time(NULL));*/ for (target_idx = 0; target_idx < len-1; target_idx++) { candidate_idx = target_idx + (rand() % (len-target_idx)); /* Swap target and candidate */ candidate = arr[candidate_idx]; target = arr[target_idx]; arr[candidate_idx] = target; arr[target_idx] = candidate; } } void shuffleLongIntArray(long int *arr, long int len) { long int target_idx, candidate_idx, target, candidate; /*srand(time(NULL));*/ for (target_idx = 0; target_idx < len-1; target_idx++) { candidate_idx = target_idx + (rand() % (len-target_idx)); /* Swap target and candidate */ candidate = arr[candidate_idx]; target = arr[target_idx]; arr[candidate_idx] = target; arr[target_idx] = candidate; } } /** * { 1 with probability p * bernoulli = { * { 0 with probability 1-p * * bernoulli(P/100)==1 is true with probability P[%] */ int bernoulli(float p) { float r; if(p==0) return 0; if(p==1) return 1; r = ((float) rand() / (RAND_MAX)); if(r<p) return 1; else return 0; } long int uniformIntegerRV(long int l, long int h) { return l+rand()%(h-l+1); } long int almostUniformIntegerRV(float mean, int sigma) { /* creates random integer, Z, variable that is approx uniform in [mean-sigma, mean+sigma] */ /* "mean" corresponds to the real expectation of Z*/ /* range(Z) = 2*simga + 1 - delta(mean-ceil(mean)) */ float mean_low, mean_high; float X_low, X_high; int b; mean_low = floor(mean); mean_high = ceil(mean); X_low = uniformIntegerRV(mean_low-sigma, mean_low+sigma); X_high = uniformIntegerRV(mean_high-sigma, mean_high+sigma); b = bernoulli(mean_high-mean); return b*X_low + (1-b)*X_high; } /**************************************** tic toc ****************************************/ void tic(float *ticToc) { (*ticToc) = -omp_get_wtime(); } void toc(float *ticToc) { (*ticToc) += omp_get_wtime(); } void ticTocDisp(float ticToc, char *ticTocName) { printf("[ticToc] %s = %e s\n", ticTocName, ticToc); } /**************************************** timer ****************************************/ void timer_reset(float *timer) { (*timer) = -omp_get_wtime(); } int timer_hasPassed(float *timer, float time_passed) { float time_now; time_now = omp_get_wtime(); if ((*timer) + time_now > time_passed ) { (*timer) = -time_now; /* reset timer */ return 1; } else { return 0; } } /**************************************** misc ****************************************/ /* Standard partition process of QuickSort(). It considers the mid element as pivot and moves all smaller element to left of it and greater elements to right */ long int partition(float arr[], long int left, long int right) { float pivot, temp = 0; long int i,j ; pivot = arr[right]; /*printf("(left, right) = (%d, %d)\n", left, right);*/ i = left; /* everything < i is <= pivot*/ for (j = left; j <= right - 1; j++) { if (arr[j] <= pivot) { _SWAP_(arr[i], arr[j], temp); i++; } } _SWAP_(arr[i], arr[right], temp); return i; /* end pivot position */ } /* This function returns k'th smallest element in arr[l..r] using QuickSort based method. WARNING: it will also mix the array around */ float kthSmallest(float arr[], long int l, long int r, long int k) { long int pivotIndex; /* If k is smaller than number of elements in array */ if (k > 0 && k <= r - l + 1) { /* Partition the array around last element and get position of pivot element in sorted array */ pivotIndex = partition(arr, l, r); if (pivotIndex - l == k - 1) return arr[pivotIndex]; if (pivotIndex - l > k - 1) { return kthSmallest(arr, l, pivotIndex - 1, k); } else { return kthSmallest(arr, pivotIndex + 1, r, k - pivotIndex + l - 1); } } else { printf("ERROR in kthSmallest: k = %ld not in [0,...,r - l + 1]=[0,...,%ld]\n", k, r-l+1); exit(1); } } /* Returns p-th percentile p \in 0 to 100 WARNING: it will also mix the array around */ float prctile(float arr[], long int len, float p) { long int k; k = p*(len-1)/100; return kthSmallest(arr, 0, len-1, k); } /* Returns approximately p-th percentile p \in 0 to 100 */ /* Uses arr(0:subsampleFacor:end-1) */ /* will leave original array unchanged */ float prctile_copyFast(float arr[], long int len, float p, int subsampleFactor) { long int i,len_sub; float *arr_sub; float result; len_sub = len/subsampleFactor; arr_sub = mget_spc(len_sub, sizeof(float)); for (i = 0; i < len_sub; ++i) { arr_sub[i] = arr[i*subsampleFactor]; } result = prctile(arr_sub, len_sub, p); free(arr_sub); return result; } /* IO routines */ long int keepWritingToBinaryFile(FILE *fp, void *var, long int numEls, int elSize, char *fName) { /* Return number of bytes written */ long int numElsWritten; numElsWritten = fwrite(var, elSize, numEls, fp); if(numElsWritten != numEls) { fprintf(stderr, "ERROR in keepWritingToBinaryFile: file \"%s\" terminated early.\n", fName); fprintf(stderr, "Tried to write %li elements of size %d Bytes. Wrote %li elements.\n", numEls, elSize, numElsWritten); fclose(fp); exit(-1); } return (long int) numEls * elSize; } long int keepReadingFromBinaryFile(FILE *fp, void *var, long int numEls, int elSize, char *fName) { /* Return number of bytes read */ long int numElsRead; numElsRead = fread(var, elSize, numEls, fp); if(numElsRead != numEls) { fprintf(stderr, "ERROR in keepReadingFromBinaryFile: file \"%s\" terminated early.\n", fName); fprintf(stderr, "Tried to read %li elements of size %d Bytes. Read %li elements.\n", numEls, elSize, numElsRead); fclose(fp); exit(-1); } return (long int) numEls * elSize; } void printFileIOInfo( char* functionName, char* fName, long int size, char mode) { char readwrite[200]; /* puts the word "Read" or "Write" into the output */ switch(mode) { case 'r': strcpy(readwrite, "Read "); break; case 'w': strcpy(readwrite, "Write"); break; default: printf("Error in printFileIOInfo: Use mode 'r' or 'w'\n"); exit(-1); } printf("\n"); printf(" ************** FILE ACCESS ********************************\n"); printf(" **** File access in: %s\n", functionName); printf("***** File name : %s\n", fName); printf("***** %-14s: %-15ld bytes\n", readwrite, size); printf("***** = %-15e kB\n", (float) size*1e-3); printf(" **** = %-15e MB\n", (float) size*1e-6); printf(" ***********************************************************\n"); } void printProgressOfLoop( long int indexOfLoop, long int NumIterations) { float percent; percent = (float) (1+indexOfLoop) / (float) NumIterations * 100; printf("\r[%.1e%%]", percent ); fflush(stdout); } void logAndDisp_message(char *fName, char* message) { log_message(fName, message); printf("%s", message); } void log_message(char *fName, char* message) { FILE *fp; fp = fopen(fName, "a"); if (fp != NULL) { fprintf(fp, "%s", message); fclose(fp); } else { fprintf(stderr, "WARNING: In log_message: Could not open file %s\n", fName); } } void resetFile(char *fName) { FILE *filePointer; filePointer = fopen(fName, "w"); fclose(filePointer); } void copySinoParams(struct SinoParams *params_src, struct SinoParams *params_dest) { params_dest->N_dv = params_src->N_dv; params_dest->N_dw = params_src->N_dw; params_dest->Delta_dv = params_src->Delta_dv; params_dest->Delta_dw = params_src->Delta_dw; params_dest->N_beta = params_src->N_beta; params_dest->u_s = params_src->u_s; params_dest->u_r = params_src->u_r; params_dest->v_r = params_src->v_r; params_dest->u_d0 = params_src->u_d0; params_dest->v_d0 = params_src->v_d0; params_dest->w_d0 = params_src->w_d0; params_dest->weightScaler_value = params_src->weightScaler_value; } void copyImgParams(struct ImageParams *params_src, struct ImageParams *params_dest) { params_dest->x_0 = params_src->x_0; params_dest->y_0 = params_src->y_0; params_dest->z_0 = params_src->z_0; params_dest->N_x = params_src->N_x; params_dest->N_y = params_src->N_y; params_dest->N_z = params_src->N_z; params_dest->Delta_xy = params_src->Delta_xy; params_dest->Delta_z = params_src->Delta_z; params_dest->j_xstart_roi = params_src->j_xstart_roi; params_dest->j_ystart_roi = params_src->j_ystart_roi; params_dest->j_zstart_roi = params_src->j_zstart_roi; params_dest->j_xstop_roi = params_src->j_xstop_roi; params_dest->j_ystop_roi = params_src->j_ystop_roi; params_dest->j_zstop_roi = params_src->j_zstop_roi; } void printSinoParams(struct SinoParams *params) { printf("\nSinogram parameters read:\n"); printf("\tN_dv = %ld,\n", params->N_dv); printf("\tN_dw = %ld,\n", params->N_dw); printf("\tDelta_dv = %e,\n", params->Delta_dv); printf("\tDelta_dw = %e,\n", params->Delta_dw); printf("\tN_beta = %ld,\n", params->N_beta); printf("\tu_s = %e,\n", params->u_s); printf("\tu_r = %e,\n", params->u_r); printf("\tv_r = %e,\n", params->v_r); printf("\tu_d0 = %e,\n", params->u_d0); printf("\tv_d0 = %e,\n", params->v_d0); printf("\tw_d0 = %e,\n", params->w_d0); printf("\t(potentially uninitialized:)\n"); printf("\tweightScaler_value = %e,\n", params->weightScaler_value); } void printImgParams(struct ImageParams *params) { printf("\nImage parameters read:\n"); printf("\tx_0 = %e \n", params->x_0); printf("\ty_0 = %e \n", params->y_0); printf("\tz_0 = %e \n", params->z_0); printf("\tN_x = %ld \n", params->N_x); printf("\tN_y = %ld \n", params->N_y); printf("\tN_z = %ld \n", params->N_z); printf("\tDelta_xy = %e \n", params->Delta_xy); printf("\tDelta_z = %e \n", params->Delta_z); printf("\tj_xstart_roi = %ld \n", params->j_xstart_roi); printf("\tj_ystart_roi = %ld \n", params->j_ystart_roi); printf("\tj_zstart_roi = %ld \n", params->j_zstart_roi); printf("\tj_xstop_roi = %ld \n", params->j_xstop_roi); printf("\tj_ystop_roi = %ld \n", params->j_ystop_roi); printf("\tj_zstop_roi = %ld \n", params->j_zstop_roi); } void printReconParams(struct ReconParams *params) { printf("\nReconstruction parameters read:\n"); printf("\tpriorWeight_QGGMRF = %e \n", params->priorWeight_QGGMRF); printf("\tpriorWeight_proxMap = %e \n", params->priorWeight_proxMap); printf("\tq = %e \n", params->q); printf("\tp = %e \n", params->p); printf("\tT = %e \n", params->T); printf("\tsigmaX = %e \n", params->sigmaX); printf("\tbFace = %e \n", params->bFace); printf("\tbEdge = %e \n", params->bEdge); printf("\tbVertex = %e \n", params->bVertex); printf("\tsigma_lambda = %e \n", params->sigma_lambda); printf("\tis_positivity_constraint = %d \n", params->is_positivity_constraint); printf("\tstopThresholdChange_pct = %e \n", params->stopThresholdChange_pct); printf("\tstopThesholdRWFE_pct = %e \n", params->stopThesholdRWFE_pct); printf("\tstopThesholdRUFE_pct = %e \n", params->stopThesholdRUFE_pct); printf("\tMaxIterations = %d \n", params->MaxIterations); printf("\trelativeChangeMode = %s \n", params->relativeChangeMode); printf("\trelativeChangeScaler = %e \n", params->relativeChangeScaler); printf("\trelativeChangePercentile = %e \n", params->relativeChangePercentile); printf("\tN_G = %d \n", params->N_G); printf("\tzipLineMode = %d \n", params->zipLineMode); printf("\tnumVoxelsPerZiplineMax = %d \n", params->numVoxelsPerZiplineMax); printf("\tnumVoxelsPerZipline = %d \n", params->numVoxelsPerZipline); printf("\tnumZiplines = %d \n", params->numZiplines); printf("\tweightScaler_estimateMode = %s \n", params->weightScaler_estimateMode); printf("\tweightScaler_domain = %s \n", params->weightScaler_domain); printf("\tweightScaler_value = %e \n", params->weightScaler_value); printf("\tNHICD_Mode = %s \n", params->NHICD_Mode); printf("\tNHICD_ThresholdAllVoxels_ErrorPercent = %e \n", params->NHICD_ThresholdAllVoxels_ErrorPercent); printf("\tNHICD_percentage = %e \n", params->NHICD_percentage); printf("\tNHICD_random = %e \n", params->NHICD_random); printf("\tverbosity = %d \n", params->verbosity); printf("\tisComputeCost = %d \n", params->isComputeCost); } void printSysMatrixParams(struct SysMatrix *A) { printf("\nSystemMatrix parameters:\n"); printf("\ti_vstride_max = %ld \n", A->i_vstride_max); printf("\ti_wstride_max = %ld \n", A->i_wstride_max); printf("\tN_u = %ld \n", A->N_u); printf("\tDelta_u = %e \n", A->Delta_u); printf("\tu_0 = %e \n", A->u_0); printf("\tu_1 = %e \n", A->u_1); printf("\tB_ij_max = %e \n", A->B_ij_max); printf("\tC_ij_max = %e \n", A->C_ij_max); printf("\tB_ij_scaler = %e \n", A->B_ij_scaler); printf("\tC_ij_scaler = %e \n", A->C_ij_scaler); }
convolutiondepthwise_3x3_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { #if __aarch64__ const int w = bottom_blob.w; #endif const int outw = top_blob.w; const int outh = top_blob.h; const int group = bottom_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f); const float* k0 = kernel.row(g); float* outptr0 = out.row(0); const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k10 = vld1q_f32(k0 + 12); float32x4_t _k11 = vld1q_f32(k0 + 16); float32x4_t _k12 = vld1q_f32(k0 + 20); float32x4_t _k20 = vld1q_f32(k0 + 24); float32x4_t _k21 = vld1q_f32(k0 + 28); float32x4_t _k22 = vld1q_f32(k0 + 32); int i = 0; #if __aarch64__ float* outptr1 = out.row(1); const float* r3 = img0.row(3); for (; i + 1 < outh; i += 2) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%3, #256] \n" "ld1 {v10.4s, v11.4s}, [%3], #32 \n" // r10 r11 "mov v16.16b, %21.16b \n" // sum00 "mov v17.16b, %21.16b \n" // sum01 "mov v18.16b, %21.16b \n" // sum02 "mov v19.16b, %21.16b \n" // sum03 "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%3] \n" // r12 r13 r14 r15 "mov v20.16b, %21.16b \n" // sum10 "mov v21.16b, %21.16b \n" // sum11 "mov v22.16b, %21.16b \n" // sum12 "mov v23.16b, %21.16b \n" // sum13 "fmla v16.4s, %15.4s, v10.4s \n" "fmla v17.4s, %15.4s, v11.4s \n" "fmla v18.4s, %15.4s, v12.4s \n" "fmla v19.4s, %15.4s, v13.4s \n" "fmla v20.4s, %12.4s, v10.4s \n" "fmla v21.4s, %12.4s, v11.4s \n" "fmla v22.4s, %12.4s, v12.4s \n" "fmla v23.4s, %12.4s, v13.4s \n" "add %3, %3, #32 \n" "fmla v16.4s, %16.4s, v11.4s \n" "fmla v17.4s, %16.4s, v12.4s \n" "fmla v18.4s, %16.4s, v13.4s \n" "fmla v19.4s, %16.4s, v14.4s \n" "fmla v20.4s, %13.4s, v11.4s \n" "fmla v21.4s, %13.4s, v12.4s \n" "fmla v22.4s, %13.4s, v13.4s \n" "fmla v23.4s, %13.4s, v14.4s \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v10.4s, v11.4s}, [%4], #32 \n" // r20 r21 "fmla v16.4s, %17.4s, v12.4s \n" "fmla v17.4s, %17.4s, v13.4s \n" "fmla v18.4s, %17.4s, v14.4s \n" "fmla v19.4s, %17.4s, v15.4s \n" "fmla v20.4s, %14.4s, v12.4s \n" "fmla v21.4s, %14.4s, v13.4s \n" "fmla v22.4s, %14.4s, v14.4s \n" "fmla v23.4s, %14.4s, v15.4s \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4] \n" // r22 r23 r24 r25 "fmla v16.4s, %18.4s, v10.4s \n" "fmla v17.4s, %18.4s, v11.4s \n" "fmla v18.4s, %18.4s, v12.4s \n" "fmla v19.4s, %18.4s, v13.4s \n" "fmla v20.4s, %15.4s, v10.4s \n" "fmla v21.4s, %15.4s, v11.4s \n" "fmla v22.4s, %15.4s, v12.4s \n" "fmla v23.4s, %15.4s, v13.4s \n" "add %4, %4, #32 \n" "fmla v16.4s, %19.4s, v11.4s \n" "fmla v17.4s, %19.4s, v12.4s \n" "fmla v18.4s, %19.4s, v13.4s \n" "fmla v19.4s, %19.4s, v14.4s \n" "fmla v20.4s, %16.4s, v11.4s \n" "fmla v21.4s, %16.4s, v12.4s \n" "fmla v22.4s, %16.4s, v13.4s \n" "fmla v23.4s, %16.4s, v14.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2], #32 \n" // r00 r01 "prfm pldl1keep, [%5, #256] \n" "ld1 {v24.4s, v25.4s}, [%5], #32 \n" // r30 r31 "fmla v16.4s, %20.4s, v12.4s \n" "fmla v17.4s, %20.4s, v13.4s \n" "fmla v18.4s, %20.4s, v14.4s \n" "fmla v19.4s, %20.4s, v15.4s \n" "fmla v20.4s, %17.4s, v12.4s \n" "fmla v21.4s, %17.4s, v13.4s \n" "fmla v22.4s, %17.4s, v14.4s \n" "fmla v23.4s, %17.4s, v15.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2] \n" // r02 r03 r04 r05 "prfm pldl1keep, [%5, #512] \n" "ld1 {v26.4s, v27.4s, v28.4s, v29.4s}, [%5] \n" // r32 r33 r34 r35 "fmla v16.4s, %12.4s, v10.4s \n" "fmla v17.4s, %12.4s, v11.4s \n" "fmla v18.4s, %12.4s, v12.4s \n" "fmla v19.4s, %12.4s, v13.4s \n" "fmla v20.4s, %18.4s, v24.4s \n" "fmla v21.4s, %18.4s, v25.4s \n" "fmla v22.4s, %18.4s, v26.4s \n" "fmla v23.4s, %18.4s, v27.4s \n" "add %2, %2, #32 \n" "fmla v16.4s, %13.4s, v11.4s \n" "fmla v17.4s, %13.4s, v12.4s \n" "fmla v18.4s, %13.4s, v13.4s \n" "fmla v19.4s, %13.4s, v14.4s \n" "fmla v20.4s, %19.4s, v25.4s \n" "fmla v21.4s, %19.4s, v26.4s \n" "fmla v22.4s, %19.4s, v27.4s \n" "fmla v23.4s, %19.4s, v28.4s \n" "add %5, %5, #32 \n" "fmla v16.4s, %14.4s, v12.4s \n" "fmla v17.4s, %14.4s, v13.4s \n" "fmla v18.4s, %14.4s, v14.4s \n" "fmla v19.4s, %14.4s, v15.4s \n" "fmla v20.4s, %20.4s, v26.4s \n" "fmla v21.4s, %20.4s, v27.4s \n" "fmla v22.4s, %20.4s, v28.4s \n" "fmla v23.4s, %20.4s, v29.4s \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k00), // %12 "w"(_k01), // %13 "w"(_k02), // %14 "w"(_k10), // %15 "w"(_k11), // %16 "w"(_k12), // %17 "w"(_k20), // %18 "w"(_k21), // %19 "w"(_k22), // %20 "w"(_bias0) // %21 : "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%3, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%3] \n" // r10 r11 r12 r13 "mov v16.16b, %21.16b \n" // sum00 "mov v17.16b, %21.16b \n" // sum01 "mov v18.16b, %21.16b \n" // sum10 "mov v19.16b, %21.16b \n" // sum11 "fmla v16.4s, %15.4s, v10.4s \n" "fmla v17.4s, %15.4s, v11.4s \n" "fmla v18.4s, %12.4s, v10.4s \n" "fmla v19.4s, %12.4s, v11.4s \n" "add %3, %3, #32 \n" "fmla v16.4s, %16.4s, v11.4s \n" "fmla v17.4s, %16.4s, v12.4s \n" "fmla v18.4s, %13.4s, v11.4s \n" "fmla v19.4s, %13.4s, v12.4s \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%4] \n" // r20 r21 r22 r23 "fmla v16.4s, %17.4s, v12.4s \n" "fmla v17.4s, %17.4s, v13.4s \n" "fmla v18.4s, %14.4s, v12.4s \n" "fmla v19.4s, %14.4s, v13.4s \n" "add %4, %4, #32 \n" "fmla v16.4s, %18.4s, v20.4s \n" "fmla v17.4s, %18.4s, v21.4s \n" "fmla v18.4s, %15.4s, v20.4s \n" "fmla v19.4s, %15.4s, v21.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%2] \n" // r00 r01 r02 r03 "fmla v16.4s, %19.4s, v21.4s \n" "fmla v17.4s, %19.4s, v22.4s \n" "fmla v18.4s, %16.4s, v21.4s \n" "fmla v19.4s, %16.4s, v22.4s \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%5] \n" // r30 r31 r32 r33 "fmla v16.4s, %20.4s, v22.4s \n" "fmla v17.4s, %20.4s, v23.4s \n" "fmla v18.4s, %17.4s, v22.4s \n" "fmla v19.4s, %17.4s, v23.4s \n" "add %2, %2, #32 \n" "fmla v16.4s, %12.4s, v10.4s \n" "fmla v17.4s, %12.4s, v11.4s \n" "fmla v18.4s, %18.4s, v24.4s \n" "fmla v19.4s, %18.4s, v25.4s \n" "add %5, %5, #32 \n" "fmla v16.4s, %13.4s, v11.4s \n" "fmla v17.4s, %13.4s, v12.4s \n" "fmla v18.4s, %19.4s, v25.4s \n" "fmla v19.4s, %19.4s, v26.4s \n" "fmla v16.4s, %14.4s, v12.4s \n" "fmla v17.4s, %14.4s, v13.4s \n" "fmla v18.4s, %20.4s, v26.4s \n" "fmla v19.4s, %20.4s, v27.4s \n" "st1 {v16.4s, v17.4s}, [%0], #32 \n" "st1 {v18.4s, v19.4s}, [%1], #32 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k00), // %12 "w"(_k01), // %13 "w"(_k02), // %14 "w"(_k10), // %15 "w"(_k11), // %16 "w"(_k12), // %17 "w"(_k20), // %18 "w"(_k21), // %19 "w"(_k22), // %20 "w"(_bias0) // %21 : "memory", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%3, #384] \n" "ld1 {v10.4s, v11.4s, v12.4s}, [%3] \n" // r10 r11 r12 "mov v16.16b, %21.16b \n" // sum0 "mov v17.16b, %21.16b \n" // sum1 "fmla v16.4s, %15.4s, v10.4s \n" "fmla v17.4s, %12.4s, v10.4s \n" "add %3, %3, #16 \n" "fmla v16.4s, %16.4s, v11.4s \n" "fmla v17.4s, %13.4s, v11.4s \n" "prfm pldl1keep, [%4, #384] \n" "ld1 {v20.4s, v21.4s, v22.4s}, [%4] \n" // r20 r21 r22 "fmla v16.4s, %17.4s, v12.4s \n" "fmla v17.4s, %14.4s, v12.4s \n" "add %4, %4, #16 \n" "fmla v16.4s, %18.4s, v20.4s \n" "fmla v17.4s, %15.4s, v20.4s \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v10.4s, v11.4s, v12.4s}, [%2] \n" // r00 r01 r02 "fmla v16.4s, %19.4s, v21.4s \n" "fmla v17.4s, %16.4s, v21.4s \n" "prfm pldl1keep, [%5, #384] \n" "ld1 {v24.4s, v25.4s, v26.4s}, [%5] \n" // r30 r31 r32 "fmla v16.4s, %20.4s, v22.4s \n" "fmla v17.4s, %17.4s, v22.4s \n" "add %2, %2, #16 \n" "fmla v16.4s, %12.4s, v10.4s \n" "fmla v17.4s, %18.4s, v24.4s \n" "add %5, %5, #16 \n" "fmla v16.4s, %13.4s, v11.4s \n" "fmla v17.4s, %19.4s, v25.4s \n" "fmla v16.4s, %14.4s, v12.4s \n" "fmla v17.4s, %20.4s, v26.4s \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3) // %5 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "w"(_k00), // %12 "w"(_k01), // %13 "w"(_k02), // %14 "w"(_k10), // %15 "w"(_k11), // %16 "w"(_k12), // %17 "w"(_k20), // %18 "w"(_k21), // %19 "w"(_k22), // %20 "w"(_bias0) // %21 : "memory", "v10", "v11", "v12", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v24", "v25", "v26"); } r0 += 2 * 4 + w * 4; r1 += 2 * 4 + w * 4; r2 += 2 * 4 + w * 4; r3 += 2 * 4 + w * 4; outptr0 += outw * 4; outptr1 += outw * 4; } #endif // __aarch64__ for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld1 {v10.4s, v11.4s}, [%1], #32 \n" // r00 r01 "mov v16.16b, %17.16b \n" // sum00 "mov v17.16b, %17.16b \n" // sum01 "mov v18.16b, %17.16b \n" // sum02 "mov v19.16b, %17.16b \n" // sum03 "prfm pldl1keep, [%1, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1] \n" // r02 r03 r04 r05 "fmla v16.4s, %8.4s, v10.4s \n" "fmla v17.4s, %8.4s, v11.4s \n" "fmla v18.4s, %8.4s, v12.4s \n" "fmla v19.4s, %8.4s, v13.4s \n" "add %1, %1, #32 \n" "fmla v16.4s, %9.4s, v11.4s \n" "fmla v17.4s, %9.4s, v12.4s \n" "fmla v18.4s, %9.4s, v13.4s \n" "fmla v19.4s, %9.4s, v14.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2], #32 \n" // r10 r11 "fmla v16.4s, %10.4s, v12.4s \n" "fmla v17.4s, %10.4s, v13.4s \n" "fmla v18.4s, %10.4s, v14.4s \n" "fmla v19.4s, %10.4s, v15.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2] \n" // r12 r13 r14 r15 "fmla v16.4s, %11.4s, v10.4s \n" "fmla v17.4s, %11.4s, v11.4s \n" "fmla v18.4s, %11.4s, v12.4s \n" "fmla v19.4s, %11.4s, v13.4s \n" "add %2, %2, #32 \n" "fmla v16.4s, %12.4s, v11.4s \n" "fmla v17.4s, %12.4s, v12.4s \n" "fmla v18.4s, %12.4s, v13.4s \n" "fmla v19.4s, %12.4s, v14.4s \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v10.4s, v11.4s}, [%3], #32 \n" // r20 r21 "fmla v16.4s, %13.4s, v12.4s \n" "fmla v17.4s, %13.4s, v13.4s \n" "fmla v18.4s, %13.4s, v14.4s \n" "fmla v19.4s, %13.4s, v15.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%3] \n" // r22 r23 r24 r25 "fmla v16.4s, %14.4s, v10.4s \n" "fmla v17.4s, %14.4s, v11.4s \n" "fmla v18.4s, %14.4s, v12.4s \n" "fmla v19.4s, %14.4s, v13.4s \n" "add %3, %3, #32 \n" "fmla v16.4s, %15.4s, v11.4s \n" "fmla v17.4s, %15.4s, v12.4s \n" "fmla v18.4s, %15.4s, v13.4s \n" "fmla v19.4s, %15.4s, v14.4s \n" "fmla v16.4s, %16.4s, v12.4s \n" "fmla v17.4s, %16.4s, v13.4s \n" "fmla v18.4s, %16.4s, v14.4s \n" "fmla v19.4s, %16.4s, v15.4s \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128]! \n" // r00 r01 "vmov q10, %q17 \n" // sum00 "vmov q11, %q17 \n" // sum01 "vmla.f32 q10, %q8, q14 \n" "vmla.f32 q11, %q8, q15 \n" "vmla.f32 q10, %q9, q15 \n" "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128]! \n" // r02 r03 "vmov q12, %q17 \n" // sum02 "vmov q13, %q17 \n" // sum03 "vmla.f32 q12, %q8, q14 \n" "vmla.f32 q11, %q9, q14 \n" "vmla.f32 q13, %q8, q15 \n" "vmla.f32 q10, %q10, q14 \n" "vmla.f32 q12, %q9, q15 \n" "vmla.f32 q11, %q10, q15 \n" // "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128] \n" // r04 r05 "vmla.f32 q13, %q9, q14 \n" "vmla.f32 q12, %q10, q14 \n" "vmla.f32 q13, %q10, q15 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n" // r10 r11 "vmla.f32 q10, %q11, q14 \n" "vmla.f32 q11, %q11, q15 \n" "vmla.f32 q10, %q12, q15 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n" // r12 r13 "vmla.f32 q12, %q11, q14 \n" "vmla.f32 q11, %q12, q14 \n" "vmla.f32 q13, %q11, q15 \n" "vmla.f32 q10, %q13, q14 \n" "vmla.f32 q12, %q12, q15 \n" "vmla.f32 q11, %q13, q15 \n" // "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128] \n" // r14 r15 "vmla.f32 q13, %q12, q14 \n" "vmla.f32 q12, %q13, q14 \n" "vmla.f32 q13, %q13, q15 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n" // r20 r21 "vmla.f32 q10, %q14, q14 \n" "vmla.f32 q11, %q14, q15 \n" "vmla.f32 q10, %q15, q15 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n" // r22 r23 "vmla.f32 q12, %q14, q14 \n" "vmla.f32 q11, %q15, q14 \n" "vmla.f32 q13, %q14, q15 \n" "vmla.f32 q10, %q16, q14 \n" "vmla.f32 q12, %q15, q15 \n" "vmla.f32 q11, %q16, q15 \n" // "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128] \n" // r24 r25 "vmla.f32 q13, %q15, q14 \n" "vmla.f32 q12, %q16, q14 \n" "vmla.f32 q13, %q16, q15 \n" "vstm %0!, {d20-d27} \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1] \n" // r00 r01 r02 r03 "mov v16.16b, %17.16b \n" // sum00 "mov v17.16b, %17.16b \n" // sum01 "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "fmla v16.4s, %8.4s, v12.4s \n" "fmla v17.4s, %8.4s, v13.4s \n" "add %1, %1, #32 \n" "fmla v18.4s, %9.4s, v13.4s \n" "fmla v19.4s, %9.4s, v14.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2] \n" // r10 r11 r12 r13 "fmla v16.4s, %10.4s, v14.4s \n" "fmla v17.4s, %10.4s, v15.4s \n" "add %2, %2, #32 \n" "fmla v18.4s, %11.4s, v20.4s \n" "fmla v19.4s, %11.4s, v21.4s \n" "fmla v16.4s, %12.4s, v21.4s \n" "fmla v17.4s, %12.4s, v22.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%3] \n" // r20 r21 r22 r23 "fmla v18.4s, %13.4s, v22.4s \n" "fmla v19.4s, %13.4s, v23.4s \n" "fmla v16.4s, %14.4s, v12.4s \n" "fmla v17.4s, %14.4s, v13.4s \n" "fmla v18.4s, %15.4s, v13.4s \n" "fmla v19.4s, %15.4s, v14.4s \n" "fmla v16.4s, %16.4s, v14.4s \n" "fmla v17.4s, %16.4s, v15.4s \n" "add %3, %3, #32 \n" "fadd v16.4s, v16.4s, v18.4s \n" "fadd v17.4s, v17.4s, v19.4s \n" "st1 {v16.4s, v17.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n" // r00 r01 "vmov q10, %q17 \n" // sum00 "vmov q11, %q17 \n" // sum01 "vmla.f32 q10, %q8, q12 \n" "vmla.f32 q11, %q8, q13 \n" "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128] \n" // r02 r03 "vmla.f32 q10, %q9, q13 \n" "vmla.f32 q11, %q9, q14 \n" "vmla.f32 q10, %q10, q14 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n" // r10 r11 "vmla.f32 q11, %q10, q15 \n" "vmla.f32 q10, %q11, q12 \n" "vmla.f32 q11, %q11, q13 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128] \n" // r12 r13 "vmla.f32 q10, %q12, q13 \n" "vmla.f32 q11, %q12, q14 \n" "vmla.f32 q10, %q13, q14 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n" // r20 r21 "vmla.f32 q11, %q13, q15 \n" "vmla.f32 q10, %q14, q12 \n" "vmla.f32 q11, %q14, q13 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128] \n" // r22 r23 "vmla.f32 q10, %q15, q13 \n" "vmla.f32 q11, %q15, q14 \n" "vmla.f32 q10, %q16, q14 \n" "vmla.f32 q11, %q16, q15 \n" "vst1.f32 {d20-d23}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; j < outw; j++) { float32x4_t _sum0 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); _sum0 = vmlaq_f32(_sum0, _k00, _r00); _sum0 = vmlaq_f32(_sum0, _k01, _r01); _sum0 = vmlaq_f32(_sum0, _k02, _r02); _sum0 = vmlaq_f32(_sum0, _k10, _r10); _sum0 = vmlaq_f32(_sum0, _k11, _r11); _sum0 = vmlaq_f32(_sum0, _k12, _r12); _sum0 = vmlaq_f32(_sum0, _k20, _r20); _sum0 = vmlaq_f32(_sum0, _k21, _r21); _sum0 = vmlaq_f32(_sum0, _k22, _r22); vst1q_f32(outptr0, _sum0); r0 += 4; r1 += 4; r2 += 4; outptr0 += 4; } r0 += 2 * 4; r1 += 2 * 4; r2 += 2 * 4; } } } static void convdw3x3s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 4; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f); const float* k0 = kernel.row(g); float* outptr0 = out; const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0 + 4); float32x4_t _k02 = vld1q_f32(k0 + 8); float32x4_t _k10 = vld1q_f32(k0 + 12); float32x4_t _k11 = vld1q_f32(k0 + 16); float32x4_t _k12 = vld1q_f32(k0 + 20); float32x4_t _k20 = vld1q_f32(k0 + 24); float32x4_t _k21 = vld1q_f32(k0 + 28); float32x4_t _k22 = vld1q_f32(k0 + 32); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%1], #64 \n" // r00 r01 r02 r03 "mov v28.16b, %17.16b \n" // sum00 "mov v29.16b, %17.16b \n" // sum01 "mov v30.16b, %17.16b \n" // sum02 "mov v31.16b, %17.16b \n" // sum03 "prfm pldl1keep, [%1, #512] \n" "ld1 {v14.4s, v15.4s, v16.4s, v17.4s}, [%1], #64 \n" // r04 r05 r06 r07 "fmla v28.4s, %8.4s, v10.4s \n" "fmla v29.4s, %8.4s, v12.4s \n" "fmla v30.4s, %8.4s, v14.4s \n" "fmla v31.4s, %8.4s, v16.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v18.4s}, [%1] \n" // r08 "fmla v28.4s, %9.4s, v11.4s \n" "fmla v29.4s, %9.4s, v13.4s \n" "fmla v30.4s, %9.4s, v15.4s \n" "fmla v31.4s, %9.4s, v17.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v28.4s, %10.4s, v12.4s \n" "fmla v29.4s, %10.4s, v14.4s \n" "fmla v30.4s, %10.4s, v16.4s \n" "fmla v31.4s, %10.4s, v18.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" // r14 r15 r16 r17 "fmla v28.4s, %11.4s, v20.4s \n" "fmla v29.4s, %11.4s, v22.4s \n" "fmla v30.4s, %11.4s, v24.4s \n" "fmla v31.4s, %11.4s, v26.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v19.4s}, [%2] \n" // r18 "fmla v28.4s, %12.4s, v21.4s \n" "fmla v29.4s, %12.4s, v23.4s \n" "fmla v30.4s, %12.4s, v25.4s \n" "fmla v31.4s, %12.4s, v27.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v28.4s, %13.4s, v22.4s \n" "fmla v29.4s, %13.4s, v24.4s \n" "fmla v30.4s, %13.4s, v26.4s \n" "fmla v31.4s, %13.4s, v19.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v14.4s, v15.4s, v16.4s, v17.4s}, [%3], #64 \n" // r24 r25 r26 r27 "fmla v28.4s, %14.4s, v10.4s \n" "fmla v29.4s, %14.4s, v12.4s \n" "fmla v30.4s, %14.4s, v14.4s \n" "fmla v31.4s, %14.4s, v16.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v18.4s}, [%3] \n" // r28 "fmla v28.4s, %15.4s, v11.4s \n" "fmla v29.4s, %15.4s, v13.4s \n" "fmla v30.4s, %15.4s, v15.4s \n" "fmla v31.4s, %15.4s, v17.4s \n" "fmla v28.4s, %16.4s, v12.4s \n" "fmla v29.4s, %16.4s, v14.4s \n" "fmla v30.4s, %16.4s, v16.4s \n" "fmla v31.4s, %16.4s, v18.4s \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128]! \n" // r00 r01 "vmov q10, %q17 \n" // sum00 "vmla.f32 q10, %q8, q14 \n" "vmov q11, %q17 \n" // sum01 "vmla.f32 q10, %q9, q15 \n" "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128]! \n" // r02 r03 "vmla.f32 q11, %q8, q14 \n" "vmla.f32 q10, %q10, q14 \n" "vmov q12, %q17 \n" // sum02 "vmla.f32 q11, %q9, q15 \n" "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128]! \n" // r04 r05 "vmla.f32 q12, %q8, q14 \n" "vmla.f32 q11, %q10, q14 \n" "vmla.f32 q12, %q9, q15 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n" // r10 r11 "vmla.f32 q10, %q11, q14 \n" "vmov q13, %q17 \n" // sum03 "vmla.f32 q10, %q12, q15 \n" "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128]! \n" // r06 r07 "vmla.f32 q13, %q8, q14 \n" "vmla.f32 q12, %q10, q14 \n" "vmla.f32 q13, %q9, q15 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n" // r12 r13 "vmla.f32 q11, %q11, q14 \n" "vmla.f32 q10, %q13, q14 \n" "vmla.f32 q11, %q12, q15 \n" "vld1.f32 {d28-d29}, [%1 :128] \n" // r08 "vmla.f32 q13, %q10, q14 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n" // r14 r15 "vmla.f32 q12, %q11, q14 \n" "vmla.f32 q11, %q13, q14 \n" "vmla.f32 q12, %q12, q15 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n" // r20 r21 "vmla.f32 q10, %q14, q14 \n" "vmla.f32 q10, %q15, q15 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n" // r16 r17 "vmla.f32 q13, %q11, q14 \n" "vmla.f32 q12, %q13, q14 \n" "vmla.f32 q13, %q12, q15 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n" // r22 r23 "vmla.f32 q11, %q14, q14 \n" "vmla.f32 q10, %q16, q14 \n" "vmla.f32 q11, %q15, q15 \n" "vld1.f32 {d28-d29}, [%2 :128] \n" // r18 "vmla.f32 q13, %q13, q14 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n" // r24 r25 "vmla.f32 q12, %q14, q14 \n" "vmla.f32 q11, %q16, q14 \n" "vmla.f32 q12, %q15, q15 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n" // r26 r27 "vmla.f32 q13, %q14, q14 \n" "vmla.f32 q12, %q16, q14 \n" "vmla.f32 q13, %q15, q15 \n" "vld1.f32 {d28-d29}, [%3 :128] \n" // r28 "vmla.f32 q13, %q16, q14 \n" "vstm %0!, {d20-d27} \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; j + 1 < outw; j += 2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%1], #64 \n" // r00 r01 r02 r03 "mov v20.16b, %17.16b \n" // sum00 "mov v21.16b, %17.16b \n" // sum01 "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "fmla v20.4s, %8.4s, v10.4s \n" "fmla v21.4s, %8.4s, v12.4s \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v14.4s}, [%1] \n" // r04 "fmla v22.4s, %9.4s, v11.4s \n" "fmla v23.4s, %9.4s, v13.4s \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v20.4s, %10.4s, v12.4s \n" "fmla v21.4s, %10.4s, v14.4s \n" "fmla v22.4s, %11.4s, v16.4s \n" "fmla v23.4s, %11.4s, v18.4s \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v15.4s}, [%2] \n" // r14 "fmla v20.4s, %12.4s, v17.4s \n" "fmla v21.4s, %12.4s, v19.4s \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v22.4s, %13.4s, v18.4s \n" "fmla v23.4s, %13.4s, v15.4s \n" "fmla v20.4s, %14.4s, v10.4s \n" "fmla v21.4s, %14.4s, v12.4s \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v14.4s}, [%3] \n" // r24 "fmla v22.4s, %15.4s, v11.4s \n" "fmla v23.4s, %15.4s, v13.4s \n" "fmla v20.4s, %16.4s, v12.4s \n" "fmla v21.4s, %16.4s, v14.4s \n" "fadd v20.4s, v20.4s, v22.4s \n" "fadd v21.4s, v21.4s, v23.4s \n" "st1 {v20.4s, v21.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else asm volatile( "pld [%1, #256] \n" "vld1.f32 {d24-d27}, [%1 :128]! \n" // r00 r01 "vmov q10, %q17 \n" // sum00 "vmov q11, %q17 \n" // sum01 "vmla.f32 q10, %q8, q12 \n" "pld [%1, #256] \n" "vld1.f32 {d28-d31}, [%1 :128]! \n" // r02 r03 "vmla.f32 q10, %q9, q13 \n" "vmla.f32 q11, %q8, q14 \n" "vmla.f32 q10, %q10, q14 \n" "vld1.f32 {d24-d25}, [%1 :128] \n" // r04 "vmla.f32 q11, %q9, q15 \n" "pld [%2, #256] \n" "vld1.f32 {d28-d31}, [%2 :128]! \n" // r10 r11 "vmla.f32 q11, %q10, q12 \n" "vmla.f32 q10, %q11, q14 \n" "pld [%2, #256] \n" "vld1.f32 {d24-d27}, [%2 :128]! \n" // r12 r13 "vmla.f32 q10, %q12, q15 \n" "vmla.f32 q11, %q11, q12 \n" "vmla.f32 q10, %q13, q12 \n" "vld1.f32 {d28-d29}, [%2 :128] \n" // r14 "vmla.f32 q11, %q12, q13 \n" "pld [%3, #256] \n" "vld1.f32 {d24-d27}, [%3 :128]! \n" // r20 r21 "vmla.f32 q11, %q13, q14 \n" "vmla.f32 q10, %q14, q12 \n" "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3 :128]! \n" // r22 r23 "vmla.f32 q10, %q15, q13 \n" "vmla.f32 q11, %q14, q14 \n" "vmla.f32 q10, %q16, q14 \n" "vld1.f32 {d24-d25}, [%3 :128] \n" // r24 "vmla.f32 q11, %q15, q15 \n" "vmla.f32 q11, %q16, q12 \n" "vst1.f32 {d20-d23}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22), // %16 "w"(_bias0) // %17 : "memory", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; j < outw; j++) { float32x4_t _sum0 = _bias0; float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r10 = vld1q_f32(r1); float32x4_t _r11 = vld1q_f32(r1 + 4); float32x4_t _r12 = vld1q_f32(r1 + 8); float32x4_t _r20 = vld1q_f32(r2); float32x4_t _r21 = vld1q_f32(r2 + 4); float32x4_t _r22 = vld1q_f32(r2 + 8); _sum0 = vmlaq_f32(_sum0, _k00, _r00); _sum0 = vmlaq_f32(_sum0, _k01, _r01); _sum0 = vmlaq_f32(_sum0, _k02, _r02); _sum0 = vmlaq_f32(_sum0, _k10, _r10); _sum0 = vmlaq_f32(_sum0, _k11, _r11); _sum0 = vmlaq_f32(_sum0, _k12, _r12); _sum0 = vmlaq_f32(_sum0, _k20, _r20); _sum0 = vmlaq_f32(_sum0, _k21, _r21); _sum0 = vmlaq_f32(_sum0, _k22, _r22); vst1q_f32(outptr0, _sum0); r0 += 2 * 4; r1 += 2 * 4; r2 += 2 * 4; outptr0 += 4; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
pmpfft.c
#include <string.h> #include <mpi.h> #include <fftw3.h> #include <fftw3-mpi.h> #include <pfft.h> #include <fastpm/libfastpm.h> #include <fastpm/logging.h> #include <fastpm/transfer.h> #include "pmpfft.h" static MPI_Datatype MPI_PTRDIFF = (MPI_Datatype) 0; #if FASTPM_FFT_PRECISION == 64 #define plan_dft_r2c pfft_plan_dft_r2c #define plan_dft_c2r pfft_plan_dft_c2r #define execute_dft_r2c pfft_execute_dft_r2c #define execute_dft_c2r pfft_execute_dft_c2r #define plan_dft_r2c_fftw fftw_mpi_plan_dft_r2c #define plan_dft_c2r_fftw fftw_mpi_plan_dft_c2r #define execute_dft_r2c_fftw fftw_mpi_execute_dft_r2c #define execute_dft_c2r_fftw fftw_mpi_execute_dft_c2r #define _pfft_init pfft_init #define _pfft_cleanup pfft_cleanup #define destroy_plan pfft_destroy_plan #define destroy_plan_fftw fftw_destroy_plan #elif FASTPM_FFT_PRECISION == 32 #define plan_dft_r2c pfftf_plan_dft_r2c #define plan_dft_c2r pfftf_plan_dft_c2r #define plan_dft_r2c_fftw fftwf_mpi_plan_dft_r2c #define plan_dft_c2r_fftw fftwf_mpi_plan_dft_c2r #define execute_dft_r2c pfftf_execute_dft_r2c #define execute_dft_c2r pfftf_execute_dft_c2r #define execute_dft_r2c_fftw fftwf_mpi_execute_dft_r2c #define execute_dft_c2r_fftw fftwf_mpi_execute_dft_c2r #define _pfft_init pfftf_init #define _pfft_cleanup pfftf_cleanup #define destroy_plan pfftf_destroy_plan #define destroy_plan_fftw fftwf_destroy_plan #endif void pm_module_init() { if(MPI_PTRDIFF) return; _pfft_init(); if(sizeof(ptrdiff_t) == 8) { MPI_PTRDIFF = MPI_LONG; } else { MPI_PTRDIFF = MPI_INT; } } void pm_module_cleanup() { if(!MPI_PTRDIFF) return; _pfft_cleanup(); MPI_PTRDIFF = (MPI_Datatype) 0; } static size_t fftw_local_size_dft_r2c(int nrnk, ptrdiff_t * n, MPI_Comm comm, int flags, ptrdiff_t * isize, ptrdiff_t * istart, ptrdiff_t * osize, ptrdiff_t * ostart ) { size_t allocsize; ptrdiff_t n2[nrnk]; int i; for(i = 0; i < nrnk; i++ ) { n2[i] = n[i]; } /* r2c is always padded !*/ n2[nrnk - 1] = n[nrnk - 1] / 2 + 1; /* translate to a compatible interface with PFFT */ for(i = 0; i < nrnk; i ++ ){ istart[i] = 0; ostart[i] = 0; isize[i] = n2[i]; if(i == nrnk - 1) { /* real input */ isize[i] *= 2; } osize[i] = n2[i]; } if(FFTW_MPI_TRANSPOSED_OUT & flags) { allocsize = fftw_mpi_local_size_transposed( 3, n2, comm, &isize[0], &istart[0], &osize[1], &ostart[1]); } else { allocsize = fftw_mpi_local_size( 3, n2, comm, &isize[0], &istart[0]); osize[0] = isize[0]; osize[1] = isize[1]; } return allocsize; } void pm_init(PM * pm, PMInit * init, MPI_Comm comm) { pm->init = *init; pm->mem = _libfastpm_get_gmem(); /* initialize the domain */ MPI_Comm_rank(comm, &pm->ThisTask); MPI_Comm_size(comm, &pm->NTask); int Ny = init->NprocY; int Nx; if(Ny <= 0) { Ny = 1; Nx = pm->NTask; if(!init->use_fftw) { for(; Ny * Ny < pm->NTask; Ny ++) continue; for(; Ny >= 1; Ny--) { if (pm->NTask % Ny == 0) break; continue; } } } else { if(pm->NTask % Ny != 0) { fastpm_raise(-1, "NprocY(%d) and NTask(%d) is incompatible\n", Ny, pm->NTask); } } Nx = pm->NTask / Ny; pm->Nproc[0] = Nx; pm->Nproc[1] = Ny; if(init->use_fftw) { if(Ny != 1) { fastpm_raise(-1, "FFTW requires Ny == 1; Ny = %d\n", Ny); } } int d; if(init->Nmesh % 2 != 0) { fastpm_raise(-1, "Nmesh must be even, but %d is odd.\n", init->Nmesh); } pm->Norm = 1.0; pm->Volume = 1.0; for(d = 0; d < 3; d ++) { pm->Nmesh[d] = init->Nmesh; pm->BoxSize[d] = init->BoxSize; pm->CellSize[d] = pm->BoxSize[d] / pm->Nmesh[d]; pm->InvCellSize[d] = 1.0 / pm->CellSize[d]; pm->Norm *= pm->Nmesh[d]; pm->Volume *= pm->BoxSize[d]; } pfft_create_procmesh(2, comm, pm->Nproc, &pm->Comm2D); if(init->use_fftw) { pm->allocsize = 2 * fftw_local_size_dft_r2c( 3, pm->Nmesh, pm->Comm2D, (pm->init.transposed?FFTW_MPI_TRANSPOSED_OUT:0), pm->IRegion.size, pm->IRegion.start, pm->ORegion.size, pm->ORegion.start); } else { pm->allocsize = 2 * pfft_local_size_dft_r2c( 3, pm->Nmesh, pm->Comm2D, (pm->init.transposed?PFFT_TRANSPOSED_OUT:0) | PFFT_PADDED_R2C, pm->IRegion.size, pm->IRegion.start, pm->ORegion.size, pm->ORegion.start); } /* Note that we need to fix up the padded size of the real data; * and transpose with strides , */ pm->IRegion.strides[2] = 1; pm->IRegion.strides[1] = pm->IRegion.size[2]; pm->IRegion.strides[0] = pm->IRegion.size[1] * pm->IRegion.strides[1]; pm->IRegion.total = pm->IRegion.size[0] * pm->IRegion.strides[0]; /* remove padding from the view */ pm->IRegion.size[2] = pm->Nmesh[2]; if(pm->init.transposed) { if(pm->init.use_fftw) { /* FFTW transposed, y, x, z */ pm->ORegion.strides[2] = 1; pm->ORegion.strides[0] = pm->ORegion.size[2]; pm->ORegion.strides[1] = pm->ORegion.size[0] * pm->ORegion.strides[0]; pm->ORegion.total = pm->ORegion.size[1] * pm->ORegion.strides[1]; } else { /* PFFT transposed, y, z, x */ pm->ORegion.strides[0] = 1; pm->ORegion.strides[2] = pm->ORegion.size[0]; pm->ORegion.strides[1] = pm->ORegion.size[2] * pm->ORegion.strides[2]; pm->ORegion.total = pm->ORegion.size[1] * pm->ORegion.strides[1]; } } else { /* non-transposed */ pm->ORegion.strides[2] = 1; pm->ORegion.strides[1] = pm->ORegion.size[2]; pm->ORegion.strides[0] = pm->ORegion.size[1] * pm->ORegion.strides[1]; pm->ORegion.total = pm->ORegion.size[0] * pm->ORegion.strides[0]; } for(d = 0; d < 2; d ++) { MPI_Comm projected; int remain_dims[2] = {0, 0}; remain_dims[d] = 1; pm->Grid.edges_int[d] = malloc(sizeof(pm->Grid.edges_int[0][0]) * (pm->Nproc[d] + 1)); pm->Grid.edges_float[d] = malloc(sizeof(pm->Grid.edges_float[0][0]) * (pm->Nproc[d] + 1)); pm->Grid.MeshtoCart[d] = malloc(sizeof(int) * pm->Nmesh[d]); /* Here we sum the size instead of using the offset, * because pfft reports local_i_start and local_ni of zero. * if the rank does not contain any blocks. Using the offsets * will cause non-increasing edges.*/ MPI_Cart_sub(pm->Comm2D, remain_dims, &projected); MPI_Allgather(&pm->IRegion.size[d], 1, MPI_PTRDIFF, pm->Grid.edges_int[d], 1, MPI_PTRDIFF, projected); MPI_Comm_free(&projected); int j; ptrdiff_t sum = 0; for(j = 0; j <= pm->Nproc[d]; j ++) { ptrdiff_t sum1 = sum + pm->Grid.edges_int[d][j]; pm->Grid.edges_int[d][j] = sum; sum = sum1; } for(j = 0; j <= pm->Nproc[d]; j ++) { pm->Grid.edges_float[d][j] = 1.0 * pm->Grid.edges_int[d][j] / pm->Nmesh[d] * pm->BoxSize[d]; } /* Last edge is at the edge of the box */ int last = pm->Nproc[d]; pm->Grid.edges_float[d][last] = pm->BoxSize[d]; if (pm->Grid.edges_int[d][last] != pm->Nmesh[d]) { fastpm_raise(-1, "last edge did not match Nmesh. Internal Error.\n"); }; /* fill in the look up table */ for(j = 0; j < pm->Nproc[d]; j ++) { int i; for(i = pm->Grid.edges_int[d][j]; i < pm->Grid.edges_int[d][j+1]; i ++) { pm->Grid.MeshtoCart[d][i] = j; } } } FastPMFloat * canvas = pm_alloc(pm); FastPMFloat * workspace = pm_alloc(pm); if(pm->init.use_fftw) { pm->r2c = plan_dft_r2c_fftw( 3, pm->Nmesh, (void*) workspace, (void*) canvas, pm->Comm2D, (pm->init.transposed?FFTW_MPI_TRANSPOSED_OUT:0) | FFTW_ESTIMATE | FFTW_DESTROY_INPUT ); pm->c2r = plan_dft_c2r_fftw( 3, pm->Nmesh, (void*) canvas, (void*) canvas, pm->Comm2D, (pm->init.transposed?FFTW_MPI_TRANSPOSED_IN:0) | FFTW_ESTIMATE | FFTW_DESTROY_INPUT ); } else { pm->r2c = plan_dft_r2c( 3, pm->Nmesh, (void*) workspace, (void*) canvas, pm->Comm2D, PFFT_FORWARD, (pm->init.transposed?PFFT_TRANSPOSED_OUT:0) | PFFT_PADDED_R2C | PFFT_ESTIMATE | PFFT_TUNE //| PFFT_MEASURE | PFFT_DESTROY_INPUT ); pm->c2r = plan_dft_c2r( 3, pm->Nmesh, (void*) workspace, (void*) workspace, pm->Comm2D, PFFT_BACKWARD, (pm->init.transposed?PFFT_TRANSPOSED_IN:0) | PFFT_PADDED_C2R | PFFT_ESTIMATE //| PFFT_MEASURE | PFFT_TUNE | PFFT_DESTROY_INPUT ); } pm_free(pm, workspace); pm_free(pm, canvas); for(d = 0; d < 3; d++) { pm->MeshtoK[d] = malloc(pm->Nmesh[d] * sizeof(double)); int i; for(i = 0; i < pm->Nmesh[d]; i++) { int ii = i; if(ii >= pm->Nmesh[d] / 2) { ii -= pm->Nmesh[d]; } pm->MeshtoK[d][i] = ii * 2 * M_PI / pm->BoxSize[d]; } } } void pm_destroy(PM * pm) { int d; if(pm->init.use_fftw) { destroy_plan_fftw(pm->r2c); destroy_plan_fftw(pm->c2r); } else { destroy_plan(pm->r2c); destroy_plan(pm->c2r); } for(d = 0; d < 3; d++) { free(pm->MeshtoK[d]); } for(d = 0; d < 2; d++) { free(pm->Grid.MeshtoCart[d]); free(pm->Grid.edges_int[d]); free(pm->Grid.edges_float[d]); } MPI_Comm_free(&pm->Comm2D); } int pm_pos_to_rank(PM * pm, double pos[3]) { int d; int ipos[3]; for(d = 0; d < 2; d ++) { ipos[d] = floor(pos[d] * pm->InvCellSize[d]); } return pm_ipos_to_rank(pm, ipos); } int pm_ipos_to_rank(PM * pm, int i[3]) { int d; int rank2d[2]; for(d = 0; d < 2; d ++) { int ipos = i[d]; if(UNLIKELY(ipos < 0)) { ipos = ipos % pm->Nmesh[d]; if(ipos < 0) { ipos += pm->Nmesh[0]; } } if(UNLIKELY(ipos >= pm->Nmesh[d])) { ipos = ipos % pm->Nmesh[d]; } rank2d[d] = pm->Grid.MeshtoCart[d][ipos]; } return rank2d[0] * pm->Nproc[1] + rank2d[1]; } void pm_r2c(PM * pm, FastPMFloat * from, FastPMFloat * to) { VALGRIND_CHECK_MEM_IS_DEFINED(from, sizeof(from[0]) * pm->allocsize); /* A gaussian of variance 1 becomes a complex gausian of variance 1/2 * (1 / Norm) in real and imag */ /* workspace to canvas*/ if(pm->init.use_fftw) { execute_dft_r2c_fftw(pm->r2c, from, (void*)to); } else { execute_dft_r2c(pm->r2c, from, (void*)to); } ptrdiff_t i; #pragma omp parallel for for(i = 0; i < pm->allocsize; i ++) { to[i] *= 1 / pm->Norm; } VALGRIND_MAKE_MEM_DEFINED(to, sizeof(to[0]) * pm->allocsize); } void pm_c2r(PM * pm, FastPMFloat * inplace) { /* r2c and c2r round trip is unitary */ VALGRIND_CHECK_MEM_IS_DEFINED(inplace, sizeof(inplace[0]) * pm->allocsize); if(pm->init.use_fftw) { execute_dft_c2r_fftw(pm->c2r, (void*) inplace, inplace); } else { execute_dft_c2r(pm->c2r, (void*) inplace, inplace); } VALGRIND_MAKE_MEM_DEFINED(inplace, sizeof(inplace[0]) * pm->allocsize); } #define unravel(ind, i, d0, d1, d2, strides) \ i[d0] = ind / strides[d0]; ind %= strides[d0]; \ i[d1] = ind / strides[d1]; ind %= strides[d1]; \ i[d2] = ind void pm_unravel_o_index(PM * pm, ptrdiff_t ind, ptrdiff_t i[3]) { /* * using pm_unravel_o_index function is slower than pm_inc_o_index, thus it is only used * during dev to test pm_inc_o_index. * */ ptrdiff_t tmp = ind; if(pm->init.transposed) { if(pm->init.use_fftw) { /* y, x, z*/ unravel(tmp, i, 1, 0, 2, pm->ORegion.strides); } else { /* y, z, x*/ unravel(tmp, i, 1, 2, 0, pm->ORegion.strides); } } else { unravel(tmp, i, 0, 1, 2, pm->ORegion.strides); } } void pm_unravel_i_index(PM * pm, ptrdiff_t ind, ptrdiff_t i[3]) { ptrdiff_t tmp = ind; unravel(tmp, i, 0, 1, 2, pm->IRegion.strides); } ptrdiff_t pm_ravel_o_index(PM * pm, ptrdiff_t i[3]) { ptrdiff_t ind = 0; int d; for(d = 0; d < 3; d++) { ind += pm->ORegion.strides[d] * i[d]; } return ind; } ptrdiff_t pm_ravel_i_index(PM * pm, ptrdiff_t i[3]) { ptrdiff_t ind = 0; int d; for(d = 0; d < 3; d++) { ind += pm->IRegion.strides[d] * i[d]; } return ind; } #define inc(i, d0, d1, d2, size) \ i[d2] ++; \ if(UNLIKELY(i[d2] == size[d2])) { \ i[d2] = 0; i[d1] ++; \ if(UNLIKELY(i[d1] == size[d1])) { \ i[d1] = 0; \ i[d0] ++; \ } \ } /* returns number of items moved in linear index */ int pm_inc_o_index(PM * pm, ptrdiff_t i[3]) { if(pm->init.transposed) { if(pm->init.use_fftw) { /* y, x, z */ inc(i, 1, 0, 2, pm->ORegion.size); } else { /* y, z, x */ inc(i, 1, 2, 0, pm->ORegion.size); } } else { /* x, y, z*/ inc(i, 0, 1, 2, pm->ORegion.size); } return 1; } /* returns number of items moved in linear index */ int pm_inc_i_index(PM * pm, ptrdiff_t i[3]) { /* can't use the macro because of the padding */ int rt = 1; i[2] ++; if(UNLIKELY(i[2] == pm->IRegion.size[2])) { /* the padding !*/ rt += 2; i[2] = 0; i[1] ++; if(UNLIKELY(i[1] == pm->IRegion.size[1])) { i[1] = 0; i[0] ++; } } return rt; } int MPI_Alltoallv_sparse(void *sendbuf, int *sendcnts, int *sdispls, MPI_Datatype sendtype, void *recvbuf, int *recvcnts, int *rdispls, MPI_Datatype recvtype, MPI_Comm comm) { int ThisTask; int NTask; MPI_Comm_rank(comm, &ThisTask); MPI_Comm_size(comm, &NTask); { /* if the send is dense, use MPI_Alltoallv directly. */ int i; size_t send_requests = 0; for(i = 0; i < NTask; i ++) { if(sendcnts[i] > 0) { send_requests ++; } } int dense = send_requests > 128; MPI_Allreduce(MPI_IN_PLACE, &dense, 1, MPI_INT, MPI_SUM, comm); /* dense is number of ranks does a lot of sends. */ if (dense > 0) { fastpm_info("Using MPI's Alltoallv"); return MPI_Alltoallv( sendbuf, sendcnts, sdispls, sendtype, recvbuf, recvcnts, rdispls, recvtype, comm); } /* else */ } fastpm_info("Using sparse Alltoallv"); int PTask; int ngrp; for(PTask = 0; NTask > (1 << PTask); PTask++); ptrdiff_t lb; ptrdiff_t send_elsize; ptrdiff_t recv_elsize; MPI_Type_get_extent(sendtype, &lb, &send_elsize); MPI_Type_get_extent(recvtype, &lb, &recv_elsize); #ifndef NO_ISEND_IRECV_IN_DOMAIN int n_requests; MPI_Request requests[NTask * 2]; n_requests = 0; for(ngrp = 0; ngrp < (1 << PTask); ngrp++) { int target = ThisTask ^ ngrp; if(target >= NTask) continue; if(recvcnts[target] == 0) continue; MPI_Irecv( ((char*) recvbuf) + recv_elsize * rdispls[target], recvcnts[target], recvtype, target, 101934, comm, &requests[n_requests++]); } MPI_Barrier(comm); /* not really necessary, but this will guarantee that all receives are posted before the sends, which helps the stability of MPI on bluegene, and perhaps some mpich1-clusters */ for(ngrp = 0; ngrp < (1 << PTask); ngrp++) { int target = ThisTask ^ ngrp; if(target >= NTask) continue; if(sendcnts[target] == 0) continue; VALGRIND_CHECK_MEM_IS_DEFINED(sendbuf, send_elsize * sdispls[target]); MPI_Isend(((char*) sendbuf) + send_elsize * sdispls[target], sendcnts[target], sendtype, target, 101934, comm, &requests[n_requests++]); } MPI_Waitall(n_requests, requests, MPI_STATUSES_IGNORE); for(ngrp = 0; ngrp < (1 << PTask); ngrp++) { int target = ThisTask ^ ngrp; if(target >= NTask) continue; if(recvcnts[target] == 0) continue; VALGRIND_MAKE_MEM_DEFINED(recvbuf, recv_elsize * rdispls[target]); } #else for(ngrp = 0; ngrp < (1 << PTask); ngrp++) { int target = ThisTask ^ ngrp; if(target >= NTask) continue; if(sendcnts[target] == 0 && recvcnts[target] == 0) continue; VALGRIND_CHECK_MEM_IS_DEFINED(sendbuf, send_elsize * sdispls[target]); MPI_Sendrecv(((char*)sendbuf) + send_elsize * sdispls[target], sendcnts[target], sendtype, target, 101934, ((char*)recvbuf) + recv_elsize * rdispls[target], recvcnts[target], recvtype, target, 101934, comm, MPI_STATUS_IGNORE); VALGRIND_MAKE_MEM_DEFINED(recvbuf, recv_elsize * rdispls[target]); } #endif /* ensure the collective-ness */ MPI_Barrier(comm); return 0; }
FirstDerivativeFirstOrder.c
/*! @file FirstDerivativeFirstOrder.c @author Debojyoti Ghosh @brief First order approximation to the first derivative */ #include <stdio.h> #include <stdlib.h> #include <basic.h> #include <mathfunctions.h> #include <arrayfunctions.h> #include <firstderivative.h> #include <mpivars.h> #include <hypar.h> typedef MPIVariables MPIContext; typedef HyPar SolverContext; #ifdef with_omp #include <omp.h> #endif /*! Computes the first-order finite-difference approximation to the first derivative (\b Note: not divided by the grid spacing): \f{equation}{ \left(\partial f\right)_i = \left\{ \begin{array}{ll} f_{i+1} - f_i & {\rm bias} = 1 \\ f_i - f_{i-1} & {\rm bias} = -1 \end{array}\right. \f} where \f$i\f$ is the grid index along the spatial dimension of the derivative. \n\n Notes: + The first derivative is computed at the grid points or the cell centers. + The first derivative is computed at the ghost points too. Thus, biased schemes are used at and near the boundaries. + \b Df and \b f are 1D arrays containing the function and its computed derivatives on a multi- dimensional grid. The derivative along the specified dimension \b dir is computed by looping through all grid lines along \b dir. */ int FirstDerivativeFirstOrder( double *Df, /*!< Array to hold the computed first derivative (with ghost points) */ double *f, /*!< Array containing the grid point function values whose first derivative is to be computed (with ghost points) */ int dir, /*!< The spatial dimension along which the derivative is computed */ int bias, /*!< Forward or backward differencing for non-central finite-difference schemes (-1: backward, 1: forward)*/ void *s, /*!< Solver object of type #SolverContext */ void *m /*!< MPI object of type #MPIContext */ ) { SolverContext *solver = (SolverContext*) s; int i, j, v; int ghosts = solver->ghosts; int ndims = solver->ndims; int nvars = solver->nvars; int *dim = solver->dim_local; if ((!Df) || (!f)) { fprintf(stderr, "Error in FirstDerivativeSecondOrder(): input arrays not allocated.\n"); return(1); } /* create index and bounds for the outer loop, i.e., to loop over all 1D lines along dimension "dir" */ int indexC[ndims], index_outer[ndims], bounds_outer[ndims]; _ArrayCopy1D_(dim,bounds_outer,ndims); bounds_outer[dir] = 1; int N_outer; _ArrayProduct1D_(bounds_outer,ndims,N_outer); #pragma omp parallel for schedule(auto) default(shared) private(i,j,v,index_outer,indexC) for (j=0; j<N_outer; j++) { _ArrayIndexnD_(ndims,j,bounds_outer,index_outer,0); _ArrayCopy1D_(index_outer,indexC,ndims); /* left boundary */ for (i = -ghosts; i < -ghosts+1; i++) { int qC, qR; indexC[dir] = i ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qC ); indexC[dir] = i+1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qR ); for (v=0; v<nvars; v++) Df[qC*nvars+v] = f[qR*nvars+v]-f[qC*nvars+v]; } /* interior */ for (i = -ghosts+1; i < dim[dir]+ghosts-1; i++) { int qC, qL, qR; indexC[dir] = i ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qC ); indexC[dir] = i-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qL); indexC[dir] = i+1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qR); for (v=0; v<nvars; v++) Df[qC*nvars+v] = max(bias,0)*f[qR*nvars+v]-bias*f[qC*nvars+v]+min(bias,0)*f[qL*nvars+v]; } /* right boundary */ for (i = dim[dir]+ghosts-1; i < dim[dir]+ghosts; i++) { int qL, qC; indexC[dir] = i-1; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qL ); indexC[dir] = i ; _ArrayIndex1D_(ndims,dim,indexC,ghosts,qC ); for (v=0; v<nvars; v++) Df[qC*nvars+v] = f[qC*nvars+v]-f[qL*nvars+v]; } } return(0); }
convolution_sgemm_pack1ton_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack1ton_fp16sa_rvv(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); // Mat bottom_im2col(size, maxk, inch, 2u, 1, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; const __fp16* bias = _bias; // permute Mat tmp; tmp.create(maxk, inch, size, 2u, 1, opt.workspace_allocator); { #pragma omp parallel for num_threads(opt.num_threads) for (int i = 0; i < size; i++) { __fp16* tmpptr = tmp.channel(i); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; img0 += size; tmpptr += 1; } } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr0 = top_blob.channel(p); int i = 0; for (; i < size; i++) { const __fp16* tmpptr = tmp.channel(i); const __fp16* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); if (bias) { _sum = vle16_v_f16m1(bias + p * packn, vl); } for (int j = 0; j < nn; j++) { __fp16 val = *tmpptr++; vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl); _sum = vfmacc_vf_f16m1(_sum, val, _w0, vl); kptr0 += packn; } vse16_v_f16m1(outptr0, _sum, vl); outptr0 += packn; } } } static void convolution_im2col_sgemm_pack1ton_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 2u, 1, opt.workspace_allocator); { const int gap = w * stride_h - outw * stride_w; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); __fp16* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const __fp16* sptr = img.row<const __fp16>(dilation_h * u) + dilation_w * v; for (int i = 0; i < outh; i++) { int j = 0; for (; j < outw; j++) { ptr[0] = sptr[0]; sptr += stride_w; ptr += 1; } sptr += gap; } } } } } im2col_sgemm_pack1ton_fp16sa_rvv(bottom_im2col, top_blob, kernel, _bias, opt); }
cq_fmt_plug.c
/* * This software is Copyright (c) Peter Kasza <peter.kasza at itinsight.hu>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_cq; #elif FMT_REGISTERS_H john_register_one(&fmt_cq); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 256 // core i7 no HT #endif #endif #include "arch.h" #include "misc.h" #include "params.h" #include "common.h" #include "formats.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "cq" #define FORMAT_NAME "ClearQuest" #define FORMAT_TAG "$cq$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "CQWeb" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 32 #define SALT_SIZE 64 // XXX double check this #define SALT_ALIGN MEM_ALIGN_NONE #define BINARY_SIZE 4 #define BINARY_ALIGN sizeof(uint32_t) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 512 static struct fmt_tests cq_tests[] = { {"$cq$admin$a9db7ca6", ""}, {"$cq$admin$10200218", "admin"}, {"$cq$admin$4cfb73f2", "password"}, {"$cq$clearquest$a279b184", "clearquest"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_key)[BINARY_SIZE / sizeof(uint32_t)]; static char saved_salt[SALT_SIZE]; unsigned int AdRandomNumbers[2048] = { 0x7aa03f9e, 0x2be5e9c7, 0x1b5ceb7b, 0x32243048, 0x3cb12e04, 0xe90d2e8f, 0xace8842a, 0xdbc021e2, 0xdb7e4414, 0x9414168d, 0xec94d186, 0xb0d45b52, 0xefa8a505, 0xee4ac734, 0xee7f3583, 0xf37a1bd0, 0x258cd1c7, 0xa93a5bf7, 0x347a23f6, 0xbf68b272, 0x5c89e744, 0x4faa8fc0, 0x54fa4bc1, 0x8f4db7cc, 0xcc78a54c, 0x84012379, 0xbb997725, 0x234612c5, 0x9b8a7120, 0xa2ea15c9, 0xa1b03515, 0xd68c512c, 0x90cbbcb0, 0xa677c1d3, 0xad4edf73, 0x0fbb4c6c, 0x7a70637d, 0x920c3ef4, 0xc31f9edf, 0xc0c29c40, 0xe0547468, 0x8af5778a, 0x4910f9e4, 0x553744df, 0xe9e5d82f, 0x9f648de3, 0xc366b6e0, 0xd2b1f83d, 0xca04733f, 0xcf3603b1, 0x27a7e70f, 0x9981f60a, 0xdd4e2cf8, 0xbcb811b3, 0x5d74ecc1, 0xa48e7106, 0x16539a54, 0xbdce7967, 0xf08c13d2, 0x2698c222, 0x3343d62d, 0xebf68da7, 0xc2bc9948, 0xea757b40, 0x37f0de67, 0xa03a4d89, 0x2cfc3cb7, 0x89230393, 0x6711f91d, 0x3812fb31, 0x9a105ad2, 0xf713d767, 0x4bd0d6c5, 0x4e4f9760, 0x9144e67b, 0x2b5b1540, 0xed6f8cd3, 0x1ab6de1d, 0x94fd67ae, 0x5fec29f0, 0x9d2f6c66, 0x701797be, 0x7ecd184e, 0x0e58eba6, 0x189fd557, 0xe70a447f, 0x3140d5ce, 0x46d55d28, 0xf77cb54a, 0x2eb11f3e, 0x331059a8, 0x1bcb4f0e, 0x253b1132, 0x9ed96195, 0xfadee553, 0x60939883, 0x76ea1724, 0x64ad91a9, 0xce605783, 0x5b17c228, 0xbb45f357, 0xee888899, 0xe36bda7e, 0xb1e6fdb6, 0xd47b22cb, 0xba2b4b6f, 0x31eb5d46, 0x72865ff5, 0xecc9077f, 0xe59c7a9a, 0xe2484234, 0x6c250954, 0x75bae5d1, 0x7d8ffc5c, 0x14aa2302, 0x0ea92748, 0xfc9fe3aa, 0x28295969, 0x2f38760d, 0xd335cab1, 0x7ffcaead, 0xcdeb00fc, 0x5aa4766f, 0xc57deab9, 0xfbeb9ac4, 0xfc59f737, 0xdc8a3d1d, 0xd5a04c8a, 0x63afc85c, 0x5a062866, 0x7feadb01, 0x9d29586e, 0x4bc63a4e, 0xc2e7a653, 0x80d132b7, 0x306d04e9, 0x27b4e1df, 0x2d4e5f39, 0xbc7f2f05, 0x8efb7bc4, 0x834368e0, 0xe37ac5c2, 0x07f7741c, 0x7d70f32b, 0xd8fc568c, 0x52e86793, 0x8a95993e, 0xc086bd26, 0xa0d0c8f9, 0x2262519a, 0xc7a79adc, 0x658ee58a, 0x94376223, 0x631fbdc3, 0x3433f9b0, 0xe469e054, 0x7b187772, 0x6ea94b34, 0x88515df2, 0xaa7c0e2b, 0xd688308e, 0x58f4d8de, 0xb3df5108, 0xa977f53d, 0x2f8bf273, 0x98c61997, 0x4c4f7cbc, 0x81a5a959, 0x8b38b887, 0x17c0d440, 0x03459240, 0x0e049148, 0xfa28dda2, 0xa364ab5b, 0x1f779ce8, 0x9013690f, 0xbf9b1284, 0x7a1704ac, 0x9deb5e71, 0x1b97bc67, 0x8560a7c3, 0xf95dc105, 0x5be1c16f, 0xa6f93b31, 0x9d540073, 0xbe36f269, 0xec1ad083, 0xb0d82bf0, 0xa362120c, 0x5d40e591, 0x4383ee00, 0xfc2bd2b3, 0x03415c36, 0x3514e8f4, 0x39fce373, 0xd8b0083c, 0xfaaf9375, 0xad0cf95b, 0x7618f7bd, 0xa1172c4f, 0xe4d6fc97, 0xd0d7d41c, 0x665d7461, 0x05a9ffd1, 0xdd7380d8, 0x2c334bab, 0xe89378a6, 0xe1b5a151, 0xb3d52dec, 0x3298d083, 0x2ef54a8c, 0x06f48c54, 0x34172d75, 0xee7a4a5b, 0x0bcbb2c4, 0xedf58b49, 0x8bbc49f3, 0x3c5c2876, 0xf8faa9d2, 0xd5c27925, 0x8b31e871, 0xce021186, 0xff86fbb1, 0xce65d1a6, 0x2ddc62f1, 0x4be585e1, 0x69554205, 0x4a4144f0, 0xd8658572, 0x7ecdf4e5, 0xeca38dba, 0xba099ee0, 0x9b776c4e, 0x406aa6c7, 0x1b2920d2, 0x20a4fe17, 0x7eefab23, 0xf6cedf61, 0x580739bc, 0xdb0d8ee2, 0xbcceef67, 0xbfeaeedf, 0x396c4060, 0xccb400cc, 0x2e411cbb, 0x671672e1, 0x25a6139b, 0x9ee52aa1, 0x7494b0f1, 0xd967d261, 0x7c3351f6, 0xcb1b564c, 0x3ebd0cbf, 0x1e451dcb, 0x197de7e2, 0xb988e765, 0x4a56963b, 0x1c1c8571, 0xc77711b8, 0x56a92f0e, 0xb480334e, 0x283314d6, 0xa2905c9a, 0xfb7a96f1, 0x033f43a9, 0xea71059a, 0x266c4710, 0x4528c417, 0x6f3a6608, 0x45699d70, 0xd72db23a, 0x411dfdeb, 0xed52ad3d, 0x9c1cd588, 0x15e8e29d, 0xe36911aa, 0x2e65957b, 0x085d2e03, 0x419ee083, 0x0b257fb9, 0x30aa0a12, 0xdc7358a7, 0xaa9d75b8, 0xcc040ac1, 0x178035c6, 0x53916d0e, 0x54f00e79, 0xf5de4034, 0xd66ff28b, 0x77fd3f74, 0x512ca7d5, 0xd6e48b64, 0xbeb2e6e6, 0xfbbc9e4d, 0xfae7f661, 0x8d529091, 0xd43abeab, 0xc445eab2, 0x31c276ea, 0x3b02aa3c, 0x104555ae, 0xf31fca4c, 0x7c86bda3, 0xbb8fa8e1, 0x561ef23b, 0xc7c3ddac, 0xc049a3b2, 0xb55ce3c1, 0x793c8199, 0xaf9cf13b, 0xd1bedce3, 0x5c9d8d47, 0x975973cd, 0xcc747711, 0xecc9b8cd, 0xc38edb0e, 0x10d46466, 0xd1742665, 0x34de2abc, 0x6af0415f, 0x33eaabe7, 0xeb5b3c88, 0xe45dbb41, 0x46a60558, 0xec3ee7fb, 0x8648812f, 0x779004e2, 0x957369ad, 0x2859ec9e, 0xd7500913, 0x7ffdbaff, 0x653c7581, 0xcb2fbe4e, 0x889a0521, 0xc2c47e53, 0x27357d53, 0x6f8e3752, 0x14fed547, 0x78f92e55, 0x12168b52, 0x0cecbc88, 0xadc37f82, 0x5c65b902, 0x464adda3, 0x51701de4, 0x94f581be, 0x3a8bfe19, 0xd928e9fc, 0x4e621fd7, 0x463c7b1a, 0xa5d610c6, 0x5225860f, 0x31f71d52, 0x59da3e38, 0xf979401f, 0x457831b6, 0x19b19ff9, 0x212898b8, 0x9d9a330d, 0x9cbbc514, 0xb1b28903, 0x92161213, 0xcd13324f, 0xa7d072ff, 0x314eb9e8, 0xbf0981ce, 0x00212756, 0xa4aa1438, 0x4b8a6088, 0x35cfaec8, 0xdd1def33, 0x7a868772, 0x43faf1be, 0x7140cda5, 0x86a8ccf2, 0xaa95fc06, 0x19eaaee0, 0x309e6b2a, 0x6943426b, 0x2c687aca, 0x9aa1e0dd, 0x5bf701cd, 0x129364b8, 0xeac81cbf, 0x3a1193ec, 0xd224cb9d, 0x0bbd5135, 0x82eb9c9d, 0x3c965dbf, 0xf6779412, 0xc71f63da, 0x28eedaa1, 0x63a4f5ba, 0x07cdd4e0, 0x072faeff, 0x21ff534d, 0x0d35cc3f, 0x4e0d4e2e, 0x75851a2f, 0x2e6779f2, 0xda7a104a, 0xbf129c30, 0x8b6305e9, 0x8d5bd156, 0xc3143454, 0x01f988a1, 0x55930029, 0xfc2f1619, 0xc4f9f598, 0x60c2ca05, 0x0c7138d9, 0xb3a34e45, 0x2f228b1a, 0x8ce79f7b, 0x0433eefb, 0x0bba45cc, 0xd9b8abdc, 0x77798336, 0xd46b5757, 0xf3f4308a, 0x8e4984c1, 0x18d3dff2, 0x43cd6cbb, 0x475cab00, 0xa0baa888, 0x7d688498, 0x33975596, 0xa8e3b619, 0x8258d065, 0x1e939418, 0xae47dcaf, 0x56a1311b, 0x41650078, 0x9ca8280b, 0x2df5bb50, 0x8ca64fba, 0x840e8608, 0x0ea9643d, 0x64666095, 0x76ae3ba8, 0x31409202, 0x5c076878, 0xa15154f5, 0xa33ad25c, 0x5d76ea5e, 0x89d98e0c, 0x7d0888c2, 0xce8ea846, 0x5422cb9c, 0x4ed2233e, 0x7a956e89, 0x9f132d5d, 0xb21ecaf1, 0xaae3eeb0, 0x5b6ca2c8, 0x4dc9ee83, 0x970b1474, 0x990ccca3, 0x223acb1d, 0x8136038e, 0xd3f3f287, 0x869f34e7, 0x11966837, 0x9952dab8, 0xdb3077d8, 0x9fe3cdd8, 0x892915cf, 0x658ae998, 0x6fa07e4a, 0xe28746ed, 0xa7821e30, 0x59d88b18, 0x1d1a7922, 0xe19a82ec, 0xd7e17b1b, 0x4d77aaee, 0x954c1f25, 0xad312a39, 0xb2fc818a, 0x29d17df3, 0x498f9862, 0xbba03635, 0xa4be43d1, 0x712e234c, 0xee3d9921, 0x87d1e33d, 0x8606d3cf, 0xc252595e, 0x6dd206de, 0xd381f5bb, 0xc0d9420f, 0x459e1991, 0x0c1892b8, 0xc79fd935, 0x416cdabe, 0xb57690a6, 0x087ea862, 0x763aff63, 0x15337abd, 0x6455d3f6, 0x524034c7, 0x7bb42336, 0x41669b3f, 0x26574372, 0xbc949e2e, 0x211af8fb, 0x7416c18e, 0x6c7c01c8, 0x20edce1e, 0xf858ab54, 0x2cde56af, 0xd4a31e32, 0xb6947234, 0xa8694d27, 0x90f90227, 0x257413b1, 0xacd234a4, 0x17e19de7, 0x71b63409, 0x8d67fb39, 0x0217d598, 0xcb548c2d, 0xce3c13da, 0x2d6c3984, 0xcc3c93db, 0x6d10ae7c, 0x893b3eb1, 0xcf58d7d6, 0x8038f673, 0xf5d60b09, 0x85c6ea1c, 0xbe121052, 0x4519c0cd, 0xe4032569, 0xc0a8e700, 0x2a72e706, 0x8534a64f, 0x9f4a1361, 0xd4ef416b, 0x43336420, 0x323e2a96, 0xd5ea02c9, 0x59782e1b, 0x4c4e02e2, 0x75171d80, 0x6aa8fba1, 0xfdc8233c, 0x36eee3c0, 0x18643046, 0xce2d3fe4, 0x73ed0154, 0xfc8dfea4, 0xd43c9f90, 0x2bbdf9fa, 0x0dd43b40, 0x74c54101, 0xd4dd41ed, 0xf06a79a5, 0xb85687ae, 0x5f85dfbd, 0xd630cc07, 0xd7105cd7, 0x46f30505, 0x15575a70, 0x1b3dff40, 0x24a9e552, 0xcc52f6c4, 0xddca62fe, 0x094da898, 0xbbf990e8, 0xd2531586, 0x79a6abe4, 0xd67bac65, 0xaf2e08a7, 0x36c28310, 0x85f3fb0a, 0x188a8332, 0x7ed37c9c, 0x9fd8d6c6, 0xba87d2eb, 0x0331d15f, 0x11438530, 0x86848311, 0x81b555f1, 0xf582213f, 0x1b39290e, 0x644ed913, 0x1a15eef2, 0x30df2149, 0x9aee5345, 0xdaf33a78, 0x1a36f066, 0xbc6d937e, 0xf0553748, 0x3c4d51a4, 0xac14c751, 0xc691eb3a, 0xd08c9cde, 0x93a2716c, 0xd46bddb8, 0x86504249, 0xd331a584, 0xbb0a34b0, 0xe095b710, 0x637512a4, 0x1b4b520d, 0xd51e5b11, 0x561b6ea5, 0xf1d870ab, 0xed478d20, 0x63e61e6d, 0xf159f48a, 0xc5e9d72c, 0x5c5ce2e0, 0x65f1e7a6, 0x3d98331a, 0x54596f93, 0x98a1fcef, 0xd603f018, 0xdf0a0fe9, 0x3133cc21, 0x1f66fa5a, 0x34d24f08, 0x3f6ffbdc, 0x8f5a7faf, 0xb6f7f344, 0x44e8f0fc, 0xb84d7ede, 0x6a193aa5, 0xd7846b26, 0xdc93b0fe, 0x8eb61507, 0x57502a82, 0xd0068b11, 0xbfc1a056, 0x5766badc, 0xcd80e8cb, 0x3ad80ff6, 0x6b07c122, 0x2fc821a0, 0x924b7f93, 0x0cb37099, 0x8f078060, 0x74ea3ad4, 0x12006e5d, 0x513db72f, 0x9ed5dc6d, 0x0f8763e1, 0xd3746c15, 0x69043f07, 0x2f1f0e29, 0x1e134f46, 0x1b673ace, 0x352869fa, 0xb903f872, 0xf44e83a9, 0x0358ba50, 0xad94f27d, 0xe3799bc7, 0x1aa584be, 0xbf6496e8, 0x020edea1, 0x8f9f1e56, 0x335d8aad, 0xb52f8536, 0x1047d008, 0x705fc7c5, 0x82a15021, 0x38ec73c5, 0x83bbeec3, 0xafe5dfad, 0x014d9399, 0x132c1fdf, 0x90781f7f, 0x9fdf14ca, 0x4f9e619e, 0xcffb2139, 0xd868773d, 0x53ab6332, 0xc7139a87, 0x069c6fa1, 0xaea8ddc8, 0xebf10a04, 0x594a74b2, 0xaf80e4a8, 0x4ea207a5, 0x0017f466, 0x780f7b67, 0x35b7e34c, 0xd5c9b942, 0x55096faf, 0xf46db3d1, 0x4ab94cc8, 0x2b69bb09, 0xd95f0cbc, 0x97694c99, 0xd1cfe216, 0xb21e227d, 0xea436ce4, 0xbd06f5f2, 0x69b7ed73, 0xdc368c1b, 0x6bc96d16, 0x00af13db, 0x33bac433, 0xe3b28392, 0x985ce7fa, 0x49981994, 0x3acd1335, 0x8a72de1d, 0x41500863, 0xa2eba987, 0x1928c7d1, 0xa599918a, 0x9b8ff4d9, 0x698b190b, 0xfdb34c56, 0x53fb5413, 0x4879c088, 0x3bef71a7, 0x54f32abc, 0xe3fe3908, 0xa8b79d24, 0x50f69baf, 0xe87713e1, 0xceeaa849, 0xb5054c6e, 0x99076ad6, 0x4cecff0b, 0x21880bcb, 0x78b035b6, 0x2ba89560, 0x82d35f09, 0xeb2456ed, 0xc404c4b3, 0xd88f0c49, 0xd07cb42f, 0x37c1d01d, 0xb81701af, 0x1b435610, 0x6945f3b8, 0xd1d51325, 0x1b89be74, 0x67fb8124, 0x48f5ee8e, 0x80181ba7, 0x10817a0c, 0x931890cb, 0xdd21311d, 0x8530cda9, 0x8e31ca20, 0x3bc790ba, 0xbb769f75, 0xd87d8134, 0x1e586f4e, 0x0afef375, 0x0756d640, 0x7e89338e, 0x7ffd1904, 0x83dbd0c7, 0xcfd2f15b, 0xfd2675d6, 0xab8cd735, 0x6c95ef68, 0x4e713995, 0x90835487, 0x193af1c9, 0xfe13ce8f, 0x82beba7f, 0x8a9c42b3, 0x44635fa6, 0xb5b71efc, 0x12b4e48a, 0xf7af1338, 0xd1ac39a8, 0xbacc03c1, 0xf24f7267, 0x8e0f9e90, 0x189cabb4, 0x3c8019a1, 0x4abecfee, 0x9d972bc8, 0xf5e931b0, 0xc26b7de1, 0x1d56f5d3, 0x77079560, 0x04ae0fac, 0x03220a78, 0xe0b95f42, 0x92d112ce, 0x2bd2afad, 0x80755047, 0x41a23ec9, 0xb49f602c, 0x3761a3f6, 0x935ee1cf, 0x1b64abe7, 0x006e19a9, 0x0b720985, 0xd8155ad3, 0x15539fef, 0xe2a32706, 0x86ac3c63, 0x5b606e14, 0x529e4a4e, 0x5d862f33, 0x68db23a7, 0x276819b7, 0x8c1e1a2d, 0xb8bdd15a, 0x7d0eb98e, 0xd3301dee, 0xeddd8b74, 0xb73dad26, 0xded6a141, 0x1f233c79, 0x324da954, 0x3eb44363, 0x530f5631, 0x60aa4163, 0xa0a53376, 0x46447217, 0x21c4a239, 0xf2813423, 0x72ec278c, 0x87b8bf19, 0x630ff66d, 0xd83cf3a0, 0x9f889ff6, 0xb99fc512, 0xce97a6ac, 0x018016d3, 0xb6c29965, 0xc06b4718, 0x837bdedb, 0x4cf09f8e, 0xe9dc4763, 0x7696df71, 0x03182a80, 0x4437d055, 0xea83a340, 0x749af1a9, 0xab377e67, 0x9c568a33, 0x15a7cce4, 0x7a0a58e1, 0xfa221945, 0x55bf880c, 0x31607015, 0xb3780687, 0x5cb9bd63, 0xfec69f6b, 0xd2e21926, 0x8b3dad02, 0x4f3b41bc, 0x75e0e8cc, 0x10589810, 0x5729ba38, 0xb0a588e1, 0x7c821c0a, 0xbc0075a0, 0x3ab20949, 0xaff6b176, 0xc8497f0b, 0x1871d8dc, 0x1ff4a370, 0x824bcf3b, 0x6eca58a6, 0xa009169f, 0xf610f928, 0x23a867df, 0x383522b8, 0xc5f83a93, 0x2c3cd773, 0x55dbf231, 0x7c64920b, 0x5ef0d8fa, 0x31db2bfe, 0x45439b02, 0x230e546f, 0xc48477fc, 0x7e83db3c, 0x989f9765, 0x36d109b3, 0x22d9ae17, 0x5b19aa54, 0xec1161f1, 0x4abd5b16, 0xf3264a50, 0x0488b52e, 0x3e0fba91, 0xd69b929c, 0x083c42ee, 0x4c37fc2d, 0xada375ec, 0xf4f80d69, 0xe645063a, 0xa9aa2bf5, 0xfb3169fb, 0xc2a162ab, 0x794a6f5b, 0x68838c2c, 0xf40b2c40, 0x4ef814f1, 0x4f847bc0, 0x5333a90e, 0x9b3f17a3, 0xa9af089f, 0xe1991375, 0x3f7384b6, 0x1fb942c7, 0x97365bba, 0xd3b6523a, 0x2d76c616, 0x5d7b944c, 0x9dffc0d9, 0x6ef0ca09, 0x745d606e, 0xc25f0d1d, 0x68335b59, 0xa5a9aa32, 0x5f609d6f, 0x58012e9c, 0xc138c974, 0x0c62589a, 0x54c7026b, 0x78576d4f, 0x7ab860fe, 0x673810e2, 0x3cd73808, 0xa20364a5, 0xc750409a, 0xaff51879, 0xa4a598b0, 0xc4f113e9, 0x28da785e, 0xf41e4752, 0xf0608ba0, 0xa3434cf2, 0x5591372b, 0x4aa08a13, 0x0ec92e82, 0x714a0880, 0x643b37f1, 0x8a2bb7a2, 0x7db8db33, 0x6f800d7f, 0xce583d48, 0x07517baf, 0xdc1d9e7d, 0xab9a13cd, 0x6f2614d0, 0xbf021e62, 0xbbd113d2, 0x426f03a7, 0xcd1cd97e, 0x321886e0, 0x894b01a2, 0x6ef2e9aa, 0x4aa8b1e0, 0xc2c2ff9a, 0x37a2d9a6, 0x3e4b059b, 0x4635f62b, 0x0ea4b349, 0x4bf910ea, 0x24d44dae, 0xc60dba2b, 0x017df1de, 0x7d7e20c1, 0x6db4c345, 0xfa704510, 0xcf23dded, 0x0c6f7db9, 0x863aea75, 0x2266ac92, 0x6434e1ec, 0xbd1f2c9f, 0x3a1f0877, 0x45c04d11, 0xc8c0a4de, 0x435beef5, 0x02c271e8, 0x86b845ba, 0x641c1fee, 0x33c0762f, 0x79d746f3, 0x2071528a, 0xd96a62b5, 0x6457d349, 0xbcf4079c, 0xb97d7c3f, 0x7bd9b37c, 0x8d22cb6d, 0xd108173d, 0x0d681c0c, 0xcf551b4f, 0x2c39510d, 0x9ebc64c9, 0x1ef42072, 0x12508fce, 0xb08f1e43, 0xe189dbf9, 0x210af02d, 0xce1ada8a, 0x5843de7d, 0xc8f2eb59, 0x97ebfc1f, 0xde0b9b2a, 0xc7ca01e9, 0x0a605020, 0x9d3265f2, 0x0ad04e2b, 0x3d203fe2, 0xb1cb3883, 0x7e3ee25a, 0x02483609, 0xac38260f, 0x6d028d74, 0xdffeb968, 0x1c1d8074, 0x53be10b2, 0x184ed1cb, 0x4e0bc9e4, 0x7d600a82, 0xd5efa6f7, 0xbe35cb9d, 0xe7aab6ff, 0xd24efe88, 0x4f95ee51, 0x531a720f, 0x9281a4d1, 0x349fe52e, 0x0372e4fb, 0xc0f4e1fe, 0xd22ca046, 0x7ad0d314, 0xb1917a84, 0x65ddfdc8, 0xf9e7d7f2, 0x731c3ae6, 0x3ccf4730, 0x97503482, 0x9a75a545, 0xf3713091, 0x7074223f, 0x5ac2d900, 0xbb5bbda3, 0xf3ac9fe5, 0xc5f432f7, 0xc06ba069, 0x59d175ee, 0xf6624a20, 0x5ad27321, 0x44ec92f7, 0x5cdcda3c, 0xe4bfbaae, 0x61a61fdb, 0xeb78d322, 0x8a225c70, 0x5af8aa8e, 0x98dfebfa, 0x5e09f08a, 0x955bc9fa, 0xaf28b29a, 0x9795588a, 0x3a880cb4, 0xacfb27cb, 0xfadfac71, 0x26f58cd2, 0x8a9a8481, 0x5c73cf8f, 0x0a764745, 0x8e7525c6, 0x5a48f4bd, 0x9a038d74, 0xbe052e79, 0x50b8ab7b, 0x847b3fd6, 0xfd7af126, 0x910b5700, 0x89f74a62, 0xeacb449c, 0xba8ab0ec, 0xfb0d55df, 0xc9000ded, 0xa0e4fdc0, 0xd7fdbcbc, 0x776ad910, 0xf8ebe464, 0xb78641a8, 0x9e19f911, 0xa39f3c7f, 0x28798682, 0x770c0c67, 0xd6618589, 0x3824cfb9, 0x284362cc, 0x6f801c7b, 0x44473f26, 0xeb97128c, 0x9f5e29f8, 0xae790aec, 0xda7ce4f2, 0x2c37baad, 0xe0733abb, 0x2f79c97c, 0x56f014f5, 0x436f9427, 0x3fa2c41e, 0x03f4016e, 0xf29583a0, 0xdaa06497, 0xaef999c4, 0xde6098ea, 0x49d744e4, 0x9ac363c4, 0x71b11295, 0x1056e853, 0xa50f1c79, 0xf58aee75, 0xd2d98a60, 0xcc086a21, 0x633306d2, 0x43516bca, 0xf24c6958, 0x75bf4c20, 0xd1aa09ce, 0xd98701cb, 0x6497d417, 0xf09334da, 0x74005f98, 0x72241183, 0x3b7be9f5, 0xf4e1cf5d, 0xb44fe087, 0x9b347ba7, 0xb662e5db, 0xaf570647, 0x1b3d5eac, 0xf2d33276, 0xf694fa88, 0xfbf7ee8c, 0x722c4ccd, 0xb164605e, 0xfa22d883, 0x716dbb44, 0x00289aec, 0xfeb07061, 0x82f6934c, 0x7b9f20ae, 0x3d2832b2, 0x2127f401, 0x7e614a69, 0xb48b17be, 0x1e9061e2, 0xcdacbff9, 0x29519f11, 0xeb31bcdb, 0xc6a9a031, 0xa1fc0693, 0xdaaacb3d, 0x00177418, 0xd3b045d4, 0x575792d3, 0x7cc871a9, 0x4107f556, 0xd5dd7111, 0xecb8a31b, 0x88f6384e, 0x3e9559d2, 0xb87eab52, 0xddc8d45c, 0xcee4cadb, 0xb9672b94, 0x786129c5, 0x8a238446, 0x5cc9ce36, 0x897b89a3, 0x248f233f, 0x93a01697, 0xa9e32583, 0x46318359, 0xb60bd733, 0xcb344995, 0x93fe4889, 0x60477ed8, 0xe0baae66, 0x6e5f97b1, 0x36c221f3, 0xdecfa578, 0x45a9e7a5, 0xee38c8e3, 0x94209438, 0x0850dc00, 0x21d4792c, 0x0bff88c7, 0xfa703b77, 0x30ce06ab, 0x77bd6475, 0x07909087, 0xc8d5076a, 0x613d19bb, 0xc50da2d5, 0x77314616, 0x10f1ca59, 0xf5ed6e4e, 0x98132237, 0xa67d0346, 0xd17c5af7, 0xe49b092a, 0x2c6ac00a, 0xd6a18085, 0xa608ec39, 0x6f93259e, 0xfcd1c805, 0x119aa599, 0xee224e6a, 0xcd42adfc, 0x5c62cac2, 0x62739935, 0xbb2899f7, 0x3f708519, 0x6cbf5af7, 0x23dab7fb, 0x4c335483, 0x30835257, 0xcb83d782, 0x60495aba, 0x9631c6ff, 0xcdab4568, 0xa9ddb16e, 0x41f50aca, 0x12347371, 0x8d2d523a, 0xf5106c41, 0xb257c349, 0x8da2838f, 0xf1dd89f0, 0xac2752a4, 0x55ea2767, 0x825e66f9, 0x70249c8e, 0x5c98333a, 0xb3bf59c7, 0x4600b593, 0xdb32d803, 0xa009b1a3, 0x573c455f, 0x93c9610b, 0x0f076b69, 0x362b989e, 0x31d3ecfc, 0x86f3c663, 0x4b7db83c, 0x1333370e, 0xef093b3c, 0x8fbfc042, 0x1299de1d, 0xb59f15e9, 0x70a306d4, 0xcdd49b7f, 0xe0e78ac4, 0x3c327e27, 0x88f19b26, 0x0e979bc4, 0xedffe30f, 0x4f5001a8, 0xc4f15293, 0xd622c7ff, 0x51effe17, 0x50bba6a6, 0xce0d506b, 0xb7652789, 0xc41720af, 0x53e907a8, 0x616fa770, 0x5d033835, 0x1039b042, 0x61e5b922, 0xfe81924a, 0xf355025c, 0x6dc5a3f1, 0x3c8f6e63, 0x5b5fe252, 0xa8b6f433, 0xac6fbfa2, 0xf6b0e5b9, 0x20f5b0b4, 0x70f68390, 0x61ad174c, 0x2d359dd8, 0xa4ad7281, 0x0e00eee7, 0x79abb5de, 0x3405bcf7, 0x581d66b4, 0x0279f078, 0xd38722d7, 0x247fe970, 0x3c031225, 0xda242c14, 0x2a2b7ae6, 0x218cef99, 0x4005f76a, 0x1ef3e30a, 0x03b957a3, 0x5f4bc0bb, 0x682bc52e, 0xfd640d9d, 0xbddf1fc4, 0xd8e6d767, 0x69b15d06, 0x5f62cd28, 0x7119ada8, 0x1c0b776a, 0x59bd0273, 0xf6a6a315, 0x62b75968, 0x3f216017, 0x10cb81c2, 0x055266ff, 0x7e4bab81, 0x4959ee06, 0x60876e87, 0x5d6ed71a, 0x8cf8c0fd, 0x7df4d611, 0x3cc63f3f, 0x2fb7033e, 0x26154a95, 0x34057009, 0x822e5c53, 0x8867b292, 0x2d784f05, 0x3e9e7ea3, 0x760d9500, 0x780a7bc4, 0xd36732b2, 0xf36d5b98, 0x20c71e2b, 0x2bc8e9a7, 0x096be252, 0xb9b9236b, 0x8071dfd4, 0xacfc1e15, 0xcbbde7c9, 0x5f33b8f0, 0xbb63b209, 0x551eb7d6, 0x113e505f, 0xb076c142, 0x43044856, 0xaf552da0, 0xd921227b, 0x10817109, 0xdf903cff, 0x1419a111, 0x620ee003, 0xb614f2da, 0x321961d0, 0x650e8b24, 0xd5b909be, 0x79c7fbf6, 0x5849e0b0, 0x9342c0ba, 0x0fbf4e5e, 0xcd01430f, 0xd80f501a, 0xcbde9f45, 0xd1f7c565, 0x20fd41c4, 0x5fef8bb7, 0x755e7eca, 0x611c1ad5, 0x3e40beb2, 0x9e864b82, 0x69973554, 0x1d1d7c51, 0x3541f0a5, 0xa2aef7cd, 0xafa3c0e1, 0x1140c1b7, 0x82f9c5fd, 0x0e53bda8, 0xeb48c46a, 0xe6e5f766, 0x686f4800, 0x84ff9918, 0x3a299c37, 0x7c3a324f, 0x25295743, 0xb9131e22, 0xa9467eff, 0x88bd0285, 0xfe7b56c4, 0x3774cab6, 0x8b709b30, 0xe98eb68f, 0xf99ed1ae, 0x21c8eda6, 0x8ceb5ee2, 0x24474545, 0x8709e6f9, 0x72b0e80f, 0x66d9c7f5, 0xe2ca210b, 0xdfbd7e43, 0xc9c5335c, 0x68b085ce, 0x021f3760, 0x2805cab4, 0xc2817224, 0x33054445, 0x05fc88f5, 0x4ef25d45, 0x1aed0636, 0xcaa0b185, 0xa8eb115b, 0x595bde5a, 0x7772b19b, 0x4dd2ca0d, 0xfc5c2185, 0x78c831dc, 0x4ffef944, 0x78430100, 0x03850200, 0x58c49038, 0x5aeee65f, 0x818f6b1d, 0x21ac1b4b, 0xecf90ec1, 0xfc678952, 0xe8340c7c, 0x51c1d8ca, 0xbfc33274, 0x1a83815c, 0x49512c1b, 0x4d262ce3, 0xab2674eb, 0xa84710f8, 0x96c698e5, 0xd045f84b, 0xc2ff8580, 0xf7a42aa9, 0x6eee74eb, 0xfde04cd3, 0xaa18c506, 0xcb3c95dc, 0x4761f04c, 0x01a75b02, 0x395cb0ce, 0x6269b03c, 0x7a4976a5, 0x982a10f3, 0x42cf92cd, 0x8c110454, 0x055408da, 0x0ada639f, 0xf3d74ec6, 0x3525b6b4, 0x21d1437d, 0x5ec84bc1, 0x5a8256e9, 0xe03bc870, 0xd4ec26ad, 0xff77bf3b, 0xf1af0c32, 0x8d8ed6df, 0x02fa5021, 0x7d04282b, 0x9d16455e, 0x5656ed1c, 0x86015646, 0x4e911190, 0x97108eb4, 0xb95d284c, 0x9d379b17, 0xe7ea9203, 0x57092245, 0xb821ff82, 0xefcef176, 0x4c7885d5, 0x36a3990d, 0x4949c392, 0x3a37d261, 0xe3542623, 0xb8f04501, 0x49554eaa, 0x5784625e, 0xc5fdb9d5, 0x089b3dc0, 0xf4741645, 0x59c2b88b, 0x044c5ecf, 0xb5b895f4, 0x5e643350, 0x59fd5974, 0x18af0a5e, 0x1690a41e, 0x7afd7df8, 0xce194b1d, 0x7348105f, 0x55bdac13, 0x992f1f30, 0x7576c408, 0xd5986ca9, 0xfb5db4bb, 0xdd077a3e, 0x1555fd40, 0x8f43e64c, 0x0891c0b2, 0xc034b002, 0xa178edf1, 0x73c6aa69, 0x43768cbf, 0x50518a88, 0x5cea403e, 0x66621f52, 0x6454ada9, 0xfc5353d7, 0xb6367c33, 0x36083ea3, 0x7c6370d7, 0x871ce0df, 0x996a9db6, 0xfc85b23e, 0x4832a696, 0xd9e633e3, 0x8d57a607, 0xb065c6f6, 0x6b14e612, 0x640a3223, 0x7d40f089, 0x00e856a5, 0x02b89be7, 0xbb17308d, 0xf495ca2e, 0x36d73234, 0xe6efc3f6, 0x3e34538a, 0x50cae00d, 0xc659ceea, 0x8f9c4fd0, 0xffd2a14b, 0x8f3c0302, 0xad6bf0e2, 0x7da1cdf3, 0x587bb565, 0xf5a45c06, 0x7b58dfac, 0x8d5689a9, 0xdd8193b7, 0x0fea45f3, 0x849600fa, 0x71d1f8ee, 0xd9cd7337, 0x10ed0d73, 0xbfb107e3, 0xae79885f, 0xbc424999, 0xefa0a006, 0x66fcd210, 0xb8221c2c, 0x4e4fcb90, 0xbc19bd01, 0x13a6165e, 0xb2456d5c, 0x9de9927f, 0x5cad9384, 0xc5b8aca8, 0x709eaed3, 0x025d5327, 0x3334f98d, 0xc8775813, 0xa7e3a7b5, 0x4900d83c, 0xdb3e88aa, 0x798cf72d, 0xc5201d9c, 0xd301c8a5, 0x24aba004, 0x7fb00d06, 0x96b07431, 0x07e817d2, 0x5dc5e25c, 0xe3565527, 0x4e1527e2, 0x9b19ac07, 0xb97a792b, 0x810e2f87, 0xb7e67428, 0xbdc1ad62, 0x4d68b464, 0x86743e97, 0x5ac3a913, 0x74a330bc, 0xedbd0163, 0x17c1ca06, 0x3762d54b, 0x67cac477, 0x57fb1db4, 0xca5c3afa, 0xe9cdec2a, 0x3f517285, 0x6c123489, 0x8c9c347f, 0xdc352535, 0xfd72d8b0, 0x615adae6, 0xd2149734, 0xb4d087d2, 0x50777523, 0x152794d7, 0xa1de388e, 0xacbfb4a7, 0xcc792a3f, 0x0c9beff1, 0x4518932e, 0xbb10562a, 0x0231bbab, 0x34d6b5e4, 0xe56b9d51, 0x1179cf3a, 0x7c9cca04, 0x7e6804dd, 0x43749e37, 0x474abbed, 0xeca9da66, 0xdbc9f38a, 0xbd90f4f2, 0xca3e3208, 0xf6804e7b, 0x6c2f368b, 0x71865b53, 0x477e86cf, 0xf9556567, 0x99f1268d, 0x5b0d3742, 0x5c7759e9, 0x329a93f4, 0x6edb40ef, 0xd8238cf4, 0x14c6653d, 0xda60ef87, 0x845f263d, 0x5ecb0b8a, 0xc0318caa, 0xb6e9c384, 0xb375f88e, 0x8bbc46cc, 0x9cf69f27, 0x5ad28fa8, 0x458c2af7, 0xc848d9c1, 0xa8fc2598, 0xef9d6e05, 0x72e2b656, 0x548e9bd8, 0x57c6446f, 0x1ed6fa9f, 0xee2aabba, 0xa0ad6aeb, 0x9cf39373, 0xe1ef4e1e, 0x84aaf630, 0x51839901, 0x1fd11926, 0x7e8d187e, 0x3f0c9a19, 0xd15b2dd2, 0x87bcba56, 0x4e4abd25, 0xfe14d638, 0x632c77f2, 0xdd2fb790, 0xa7172b76, 0x51b3b07e, 0xbc087c39, 0x08e4bfcd, 0x835236c7, 0x2f55bb11, 0x01de9ed4, 0x3dfc98ce, 0x28776e03, 0x98fb6143, 0x3b05e401, 0x0999f936, 0x673cda1e, 0x9075e503, 0x80dbd8a1, 0x1d113be7, 0x368624b0, 0xd06b7118, 0xdc0378f6, 0x0c6aee59, 0x3c77c121, 0xb75108d5, 0xa1d6c1a7, 0xdacd595b, 0x71d9b0e5, 0x4afe1c59, 0x4a2cd1e8, 0x8c3eda88, 0x97ae95c2, 0x6bfacf26, 0x47182448, 0xdbd882ef, 0xae660019, 0xb0986ca0, 0xe8daf1be, 0xb18b34dd, 0x6ba59afa, 0x560c57ea, 0xf6be34ba, 0xcd89f600, 0x6403fcfe, 0x5e429947, 0x7d4886e1, 0x8018e754, 0xc17251e7, 0x36eab7c6, 0x9067ad78, 0xe95f1fe6, 0x44961501, 0x8619621c, 0xdb13e77d, 0xd5d6124d, 0xd36f8ac0, 0x9395356c, 0xf713d1e1, 0x2b3c9d15, 0x4dfb98f9, 0xb553ff7f, 0xd1675da5, 0x382dcfdb, 0x659ed198, 0xa0bfe7d7, 0x0c1fe7f7, 0x6ae7194d, 0x1966c9fe, 0x369f1f79, 0x1137c2c2, 0xf7a06345, 0xfe3f544b, 0xac35a2f4, 0xd7aea537, 0x9b37ff3b, 0xfc395a7b, 0xf3c2710a, 0xf7ec7804, 0x5f5820ca, 0x72b2a99c, 0x6162f1ca, 0x9f4288a2, 0xa888ed1e, 0x02208839, 0xea56c569, 0xace682bc, 0x95096878, 0xf33986ce, 0xbc3ab34e, 0x852fb06b, 0x4d809b7b, 0x3475e9c8, 0xe947baae, 0x18535080, 0x205c85fa, 0x5792f851, 0xe029ec48, 0xd4403f27, 0x587471da, 0x3bd97278, 0x91f1a328, 0x65ea5d8a, 0xc0cfbf0f, 0x135abf90, 0x62843a32, 0xdf6a7aa1, 0x79dc7616, 0xd091c454, 0x355e2d4a, 0xa54e04a6, 0x25719823, 0x41bfa322, 0x94ec342e, 0xbcd06558, 0x52775497, 0xdcd0c726, 0x8c8f3975, 0xbc31513b, 0xb9b3acec, 0x33bbeff5, 0xa3432872, 0xd8dd265e, 0xc1d9f64e, 0xe016c95d, 0x840b9c5e, 0xecdf0d3b, 0x9335eac7, 0xe319c342, 0xaf8a83ca, 0xc2f11b65, 0x8d40c919, 0x3b91cd82, 0x70aad694, 0x341ff4ee, 0xb3fdc758, 0x86e8e96c, 0x239e0308, 0x7daaf786, 0x6d9c3e2e, 0x028237e0, 0x652b0a79, 0x0f203491, 0xccb40e73, 0xb1954681, 0x7ab03780, 0x9cd9ef50, 0x43e720b7, 0xe7de746d, 0xade36a14, 0xbb4f7503, 0x21ef55aa, 0x45a0e8e3, 0x1156ea33, 0x1091d26b, 0xc8b9a8d0, 0x722df639, 0x92977f76, 0x8cb11fe6, 0x0aaeedc7, 0xb1096093, 0xe45ea74f, 0xc2b54ff5, 0x190e549f, 0x222192cb, 0x695f9d7e, 0x926d466a, 0x0dc294aa, 0x7e16a1b5, 0xa4bd2267, 0x19ee878a, 0xc5b8c83a, 0x001b6546, 0x6fbf7edf, 0xb3708024, 0x8e98402d, 0x86af015f, 0x38dfd5b8, 0xc50808ef, 0x29a71185, 0x85f233d5, 0x9dea5939, 0xce3cf02c, 0x45d68b76, 0x745a85b0, 0x6fe38075, 0x52e01b99, 0xc4693697, 0x2cd0bf67, 0x3edecb8f, 0xeb76f624, 0xe3eb94f4, 0xf587d9e5, 0x813543ea, 0x93dfaced, 0x018c23ad, 0xb6781fd7, 0xbd564fc3, 0x962da3f0, 0x389ab6b1, 0x5866fd23, 0x1f8b3979, 0xc10386bc, 0x4ef64f11, 0xb4572417, 0xa2f65667, 0xd68b6523, 0xa5512c00, 0x2d5f663e, 0x3bf700ff, 0x09ef3396, 0x451bcb0d, 0xfee39ccd, 0xf606c443, 0xb46ffa45, 0x85bcfa16, 0x7fc6efa9, 0x701ec2fe, 0xde98a301, 0xd9980668, 0xc5d004ae, 0xd03dbbb3, 0xb1d795c2, 0x2ab6203e, 0xfa237b58, 0xbf425321, 0x000e019a, 0x2547dbbf, 0xfb97b8ed, 0x08b09edd, 0x42cd7b68, 0x32198a7f, 0x87a2a72d, 0xe0a6a1aa, 0x2866775c, 0xc66e7ac5, 0x9edc41d3, 0x983a6ec5, 0xd3e9793a, 0xa53ad299, 0x38d774ce, 0x75ce7fbd, 0xb353f86e, 0xc37bc25e, 0x5f2b5532, 0x1975cde4, 0xc8294de5, 0x47fbc7c1, 0xb76b2789, 0xea88c920, 0x6c2145ba, 0x4c2211f2, 0x16a0e6de, 0xa2915469, 0xea2b14e2, 0x35202f43, 0xaa9c69de, 0xbd00bd0e, 0xbea9e3f0, 0xbfb0bb47, 0x74347ce4, 0x1bc15e8f, 0xd9f70c10, 0x968f1e31, 0xc55fa605, 0x47af7015, 0xe772ade0, 0x28a5c782, 0x0955a18f, 0x1054e43d, 0x5d7702b1, 0xd54a0e22, 0x82f0f8be, 0x787b2bce, 0x243ce7b8, 0xaa160942, 0x5e7477e9, 0x45cd0df6, 0x5bdca3d7, 0x6b992304, 0x4a8d3515, 0xfe356a57, 0xb011fc33, 0x0e2624c6, 0x0ba4ae36, 0x029fecca, 0x01ac36cc, 0x661010ac, 0x7a8a378a, 0xf499b342, 0x973256e2, 0x1229865d, 0x03411f03, 0x7d925789, 0x6399fa28, 0x1859ddb8, 0x83becb6d, 0x3b8322f5, 0xd0d5f97a, 0xc0fcca51, 0x6be07c35, 0x6072bdea, 0xc09b95e6, 0xa40826f8, 0x1fb79832, 0xc401e83d, 0xcff57a0c, 0x834ee1c2, 0x59ab9f78, 0xad6e094a, 0xad2218bb, 0x3bbf864c, 0xbb62b997, 0x23eed3ff, 0xd033de59, 0x031f0ec4, 0xbcfabf99, 0xac63ae10, 0xe4dc4aec, 0x5b98d623, 0x0dad0246, 0xb5884cbb, 0xa39db5c7, 0x3c243f66, 0x5e69dbfb, 0x3384971d, 0x9145a99e, 0x87570b15, 0xd6821205, 0x34fa05cf, 0xff4ac046, 0x8a98f678, 0x72add320, 0x910a51a5, 0xe78b8b93, 0xb28cd243, 0x6a752e76, 0x16b4e6a9, 0x60b5e403, 0xc5c51f70, 0xbee86c57, 0x75a122c3, 0x3b7e773d, 0xfd8ab8ec, 0x72839672, 0x6a713aa4, 0x18fd1c1d, 0x2ae1e7db, 0xa77453d7, 0x01e7e6c4, 0x31b08d49, 0x636c7119, 0x736028e8, 0x75a31941, 0xcf080b2c, 0x4a92a8fb, 0x84f6b87a, 0x4f97e0dc, 0x8a7b11d2, 0x1ce7f369, 0x056f3a69, 0x40393f83, 0xffc98a61, 0x80daf387, 0xc6a757b1, 0xa95790e2, 0x1c76cf02, 0xa1450bba, 0x3a3150e5, 0x378e9844, 0x7c47420d, 0x617d2066, 0x8cbd025e, 0x252260a0, 0xd7ded568, 0x8e5400d7 }; unsigned int AdEncryptPassword(const char* username, const char* password) { unsigned int userlength; unsigned int passlength; unsigned int a = 0; int i; for (i = 0; username[i] != 0; i++) { a += AdRandomNumbers[(i + username[i]) & 0x7ff]; } userlength = i; for (i = 0; password[i] != 0; i++) { a += AdRandomNumbers[(i + password[i] + userlength) & 0x7ff]; } passlength = i; return AdRandomNumbers[(userlength + passlength) & 0x7ff] + a; } static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_align(sizeof(*saved_key), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_key = mem_calloc_align(sizeof(*crypt_key), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static void done(void) { MEM_FREE(crypt_key); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q, *tmpstr; int extra; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) return 0; tmpstr = strdup(ciphertext); q = p = &tmpstr[TAG_LENGTH]; p[-1] = 0; p = strrchr(p, '$'); if (!p) goto Err; p += 1; if (hexlenl(p, &extra) != BINARY_SIZE * 2 || extra) goto Err; if ((p - q) >= SALT_SIZE || p <= q) goto Err; MEM_FREE(tmpstr); return 1; Err:; MEM_FREE(tmpstr); return 0; } static void *get_salt(char *ciphertext) { static char salt[SALT_SIZE + 1]; char *p, *q; memset(salt, 0, SALT_SIZE); p = ciphertext + TAG_LENGTH; q = strrchr(p, '$'); memcpy(salt, p, q - p); return salt; } static void* get_binary(char *ciphertext) { char *p; unsigned int* out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); p = strrchr(ciphertext, '$') + 1; *out = (unsigned int)strtoul(p, NULL, 16); return out; } static int salt_hash(void *salt) { unsigned char *s = salt; unsigned int hash = 5381; unsigned int len = SALT_SIZE; while (len-- && *s) hash = ((hash << 5) + hash) ^ *s++; return hash & (SALT_HASH_SIZE - 1); } static void set_salt(void *salt) { memcpy(saved_salt, salt, SALT_SIZE); } static void cq_set_key(char *key, int index) { strncpy(saved_key[index], key, sizeof(saved_key[0])); } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (index = 0; index < count; index++) #endif { *crypt_key[index] = AdEncryptPassword(saved_salt, saved_key[index]); } return count; } static int cmp_all(void *binary, int count) { int i = 0; #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (i = 0; i < count; ++i) #endif { if ((*(unsigned int*)binary) == *(unsigned int*)crypt_key[i]) return 1; } return 0; } static int cmp_one(void *binary, int index) { if ((*(unsigned int*) binary) == *(unsigned int*) crypt_key[index]) return 1; return 0; } static int cmp_exact(char *source, int index) { return 1; } static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; } struct fmt_main fmt_cq = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD | FMT_NOT_EXACT, { NULL }, { FORMAT_TAG }, cq_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, cq_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif
GB_binop__bset_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bset_int32) // A.*B function (eWiseMult): GB (_AemultB_08__bset_int32) // A.*B function (eWiseMult): GB (_AemultB_02__bset_int32) // A.*B function (eWiseMult): GB (_AemultB_04__bset_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_int32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bset_int32) // C+=b function (dense accum): GB (_Cdense_accumb__bset_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_int32) // C=scalar+B GB (_bind1st__bset_int32) // C=scalar+B' GB (_bind1st_tran__bset_int32) // C=A+scalar GB (_bind2nd__bset_int32) // C=A'+scalar GB (_bind2nd_tran__bset_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = GB_BITSET (aij, bij, int32_t, 32) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITSET (x, y, int32_t, 32) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSET || GxB_NO_INT32 || GxB_NO_BSET_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bset_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bset_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bset_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bset_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bset_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bset_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bset_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bset_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bset_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITSET (x, bij, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bset_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITSET (aij, y, int32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (x, aij, int32_t, 32) ; \ } GrB_Info GB (_bind1st_tran__bset_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (aij, y, int32_t, 32) ; \ } GrB_Info GB (_bind2nd_tran__bset_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
target-data.c
// RUN: %libomptarget-compile-generic -fopenmp-extensions // RUN: %libomptarget-run-generic | %fcheck-generic -strict-whitespace #include <omp.h> #include <stdio.h> #define CHECK_PRESENCE(Var1, Var2, Var3) \ printf(" presence of %s, %s, %s: %d, %d, %d\n", \ #Var1, #Var2, #Var3, \ omp_target_is_present(&Var1, omp_get_default_device()), \ omp_target_is_present(&Var2, omp_get_default_device()), \ omp_target_is_present(&Var3, omp_get_default_device())) int main() { int m, r, d; // CHECK: presence of m, r, d: 0, 0, 0 CHECK_PRESENCE(m, r, d); // ----------------------------------------------------------------------- // CHECK-NEXT: check:{{.*}} printf("check: dyn>0, hold=0, dec/reset dyn=0\n"); // CHECK-NEXT: structured{{.*}} printf(" structured dec of dyn\n"); #pragma omp target data map(tofrom: m) map(alloc: r, d) { // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target data map(tofrom: m) map(alloc: r, d) { // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); } // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); } // CHECK-NEXT: presence of m, r, d: 0, 0, 0 CHECK_PRESENCE(m, r, d); // CHECK-NEXT: dynamic{{.*}} printf(" dynamic dec/reset of dyn\n"); #pragma omp target data map(tofrom: m) map(alloc: r, d) { // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target data map(tofrom: m) map(alloc: r, d) { // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target exit data map(from: m) map(release: r) // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target exit data map(from: m) map(release: r) map(delete: d) // CHECK-NEXT: presence of m, r, d: 0, 0, 0 CHECK_PRESENCE(m, r, d); } // CHECK-NEXT: presence of m, r, d: 0, 0, 0 CHECK_PRESENCE(m, r, d); #pragma omp target exit data map(from: m) map(release: r) map(delete: d) // CHECK-NEXT: presence of m, r, d: 0, 0, 0 CHECK_PRESENCE(m, r, d); } // CHECK-NEXT: presence of m, r, d: 0, 0, 0 CHECK_PRESENCE(m, r, d); // ----------------------------------------------------------------------- // CHECK: check:{{.*}} printf("check: dyn=0, hold>0, dec/reset dyn=0, dec hold=0\n"); // Structured dec of dyn would require dyn>0. // CHECK-NEXT: dynamic{{.*}} printf(" dynamic dec/reset of dyn\n"); #pragma omp target data map(ompx_hold, tofrom: m) map(ompx_hold, alloc: r, d) { // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target data map(ompx_hold, tofrom: m) \ map(ompx_hold, alloc: r, d) { // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target exit data map(from: m) map(release: r) // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target exit data map(from: m) map(release: r) map(delete: d) // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); } // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target exit data map(from: m) map(release: r) map(delete: d) // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); } // CHECK-NEXT: presence of m, r, d: 0, 0, 0 CHECK_PRESENCE(m, r, d); // ----------------------------------------------------------------------- // CHECK: check:{{.*}} printf("check: dyn>0, hold>0, dec/reset dyn=0, dec hold=0\n"); // CHECK-NEXT: structured{{.*}} printf(" structured dec of dyn\n"); #pragma omp target data map(ompx_hold, tofrom: m) map(ompx_hold, alloc: r, d) { // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target data map(ompx_hold, tofrom: m) \ map(ompx_hold, alloc: r, d) { // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target data map(tofrom: m) map(alloc: r, d) { // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target data map(tofrom: m) map(alloc: r, d) { // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); } // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); } // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); } // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); } // CHECK-NEXT: presence of m, r, d: 0, 0, 0 CHECK_PRESENCE(m, r, d); // CHECK-NEXT: dynamic{{.*}} printf(" dynamic dec/reset of dyn\n"); #pragma omp target enter data map(to: m) map(alloc: r, d) // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target enter data map(to: m) map(alloc: r, d) // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target data map(ompx_hold, tofrom: m) map(ompx_hold, alloc: r, d) { // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target data map(ompx_hold, tofrom: m) \ map(ompx_hold, alloc: r, d) { // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target exit data map(from: m) map(release: r) // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target exit data map(from: m) map(release: r) map(delete: d) // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); } // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target exit data map(from: m) map(release: r) map(delete: d) // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); } // CHECK-NEXT: presence of m, r, d: 0, 0, 0 CHECK_PRESENCE(m, r, d); // ----------------------------------------------------------------------- // CHECK: check:{{.*}} printf("check: dyn>0, hold>0, dec hold=0, dec/reset dyn=0\n"); // CHECK-NEXT: structured{{.*}} printf(" structured dec of dyn\n"); #pragma omp target data map(tofrom: m) map(alloc: r, d) { // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target data map(tofrom: m) map(alloc: r, d) { // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target data map(ompx_hold, tofrom: m) \ map(ompx_hold, alloc: r, d) { // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target data map(ompx_hold, tofrom: m) \ map(ompx_hold, alloc: r, d) { // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); } // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); } // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); } // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); } // CHECK-NEXT: presence of m, r, d: 0, 0, 0 CHECK_PRESENCE(m, r, d); // CHECK-NEXT: dynamic{{.*}} printf(" dynamic dec/reset of dyn\n"); #pragma omp target enter data map(to: m) map(alloc: r, d) // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target enter data map(to: m) map(alloc: r, d) // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target data map(ompx_hold, tofrom: m) map(ompx_hold, alloc: r, d) { // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target data map(ompx_hold, tofrom: m) \ map(ompx_hold, alloc: r, d) { // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); } // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); } // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target exit data map(from: m) map(release: r) // CHECK-NEXT: presence of m, r, d: 1, 1, 1 CHECK_PRESENCE(m, r, d); #pragma omp target exit data map(from: m) map(release: r) map(delete: d) // CHECK-NEXT: presence of m, r, d: 0, 0, 0 CHECK_PRESENCE(m, r, d); return 0; }
GB_binop__isne_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isne_int64 // A.*B function (eWiseMult): GB_AemultB__isne_int64 // A*D function (colscale): GB_AxD__isne_int64 // D*A function (rowscale): GB_DxB__isne_int64 // C+=B function (dense accum): GB_Cdense_accumB__isne_int64 // C+=b function (dense accum): GB_Cdense_accumb__isne_int64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isne_int64 // C=scalar+B GB_bind1st__isne_int64 // C=scalar+B' GB_bind1st_tran__isne_int64 // C=A+scalar GB_bind2nd__isne_int64 // C=A'+scalar GB_bind2nd_tran__isne_int64 // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x != y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNE || GxB_NO_INT64 || GxB_NO_ISNE_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isne_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isne_int64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isne_int64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isne_int64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isne_int64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__isne_int64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isne_int64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isne_int64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = Bx [p] ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isne_int64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = Ax [p] ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB_bind1st_tran__isne_int64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB_bind2nd_tran__isne_int64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
residualbased_elimination_builder_and_solver_with_constraints.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Vicente Mataix Ferrandiz // // #if !defined(KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_WITH_CONSTRAINTS) #define KRATOS_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_WITH_CONSTRAINTS /* System includes */ #include <unordered_set> #include <unordered_map> /* External includes */ /* Project includes */ #include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h" #include "utilities/sparse_matrix_multiplication_utility.h" #include "utilities/constraint_utilities.h" #include "input_output/logger.h" #include "utilities/builtin_timer.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class ResidualBasedEliminationBuilderAndSolverWithConstraints * @ingroup KratosCore * @brief Current class provides an implementation for standard builder and solving operations. * @details The RHS is constituted by the unbalanced loads (residual) * Degrees of freedom are reordered putting the restrained degrees of freedom at * the end of the system ordered in reverse order with respect to the DofSet. * Imposition of the dirichlet conditions is naturally dealt with as the residual already contains * this information. * Calculation of the reactions involves a cost very similiar to the calculation of the total residual * The system is build in the following manner. A T matrix is assembled and constant vector g is assembled too. The T matrix contains the relations of all the dofs of the system, even the nodes with no master/slave relation. Then the size is n_total x n_red * The relation u = T u_red * Then: * A_red = T^t A T * b_red = T^t (b - A g) * @todo There is a more efficient way to asemble the system, but more costly, which is the following. In this case T will be only a relation matrix between master and slave dofs. Then n_slave x n_master: us = T um + g * Separating into independent dofs, master ans slave dofs: * u = uu * um * us * A = Auu Aum Aus * Amu Amm Ams * Asu Asm Ass * b = bu * bm * bs * Finally: * A_red = Auu Aum + Aus T * Amu + T^t Asu Amm + T^t Ams^t + Ams T + T^t Ass T * b_red = bu - Aus g * bm - Ams g * * This system requires extra care and is more complicated and requires to compute the blocks properly * @author Vicente Mataix Ferrandiz */ template <class TSparseSpace, class TDenseSpace, class TLinearSolver > class ResidualBasedEliminationBuilderAndSolverWithConstraints : public ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ /// Pointer definition of ResidualBasedEliminationBuilderAndSolverWithConstraints KRATOS_CLASS_POINTER_DEFINITION(ResidualBasedEliminationBuilderAndSolverWithConstraints); /// Definition of the base class typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BuilderAndSolverBaseType; /// Definition of the base class typedef ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; /// The definition of the current class typedef ResidualBasedEliminationBuilderAndSolverWithConstraints<TSparseSpace, TDenseSpace, TLinearSolver> ClassType; // The size_t types typedef std::size_t SizeType; typedef std::size_t IndexType; /// Definition of the classes from the base class typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef typename BaseType::NodeType NodeType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; /// Additional definitions typedef PointerVectorSet<Element, IndexedObject> ElementsContainerType; typedef Element::EquationIdVectorType EquationIdVectorType; typedef Element::DofsVectorType DofsVectorType; typedef boost::numeric::ublas::compressed_matrix<double> CompressedMatrixType; /// DoF types definition typedef typename NodeType::DofType DofType; typedef typename DofType::Pointer DofPointerType; /// Set definition typedef std::unordered_set<IndexType> IndexSetType; /// Map definition typedef std::unordered_map<IndexType, IndexType> IndexMapType; /// MPC definitions typedef MasterSlaveConstraint MasterSlaveConstraintType; typedef typename MasterSlaveConstraint::Pointer MasterSlaveConstraintPointerType; typedef std::vector<IndexType> VectorIndexType; typedef Vector VectorType; ///@} ///@name Enum's ///@{ ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor */ explicit ResidualBasedEliminationBuilderAndSolverWithConstraints() : BaseType() { } /** * @brief Default constructor. (with parameters) */ explicit ResidualBasedEliminationBuilderAndSolverWithConstraints( typename TLinearSolver::Pointer pNewLinearSystemSolver, Parameters ThisParameters ) : BaseType(pNewLinearSystemSolver) { // Validate and assign defaults ThisParameters = this->ValidateAndAssignParameters(ThisParameters, this->GetDefaultParameters()); this->AssignSettings(ThisParameters); } /** * @brief Default constructor */ explicit ResidualBasedEliminationBuilderAndSolverWithConstraints( typename TLinearSolver::Pointer pNewLinearSystemSolver, const bool CheckConstraintRelation = true, const bool ResetRelationMatrixEachIteration = false ) : BaseType(pNewLinearSystemSolver), mCheckConstraintRelation(CheckConstraintRelation), mResetRelationMatrixEachIteration(ResetRelationMatrixEachIteration) { } /** Destructor. */ ~ResidualBasedEliminationBuilderAndSolverWithConstraints() override { } /** * @brief Create method * @param pNewLinearSystemSolver The linear solver for the system of equations * @param ThisParameters The configuration parameters */ typename BuilderAndSolverBaseType::Pointer Create( typename TLinearSolver::Pointer pNewLinearSystemSolver, Parameters ThisParameters ) const override { return Kratos::make_shared<ClassType>(pNewLinearSystemSolver,ThisParameters); } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ void SetUpSystem(ModelPart& rModelPart) override { if(rModelPart.MasterSlaveConstraints().size() > 0) SetUpSystemWithConstraints(rModelPart); else BaseType::SetUpSystem(rModelPart); } /** * @brief Function to perform the build of the RHS. The vector could be sized as the total number * of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rb The RHS vector */ void Build( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rb ) override { if(rModelPart.MasterSlaveConstraints().size() > 0) BuildWithConstraints(pScheme, rModelPart, rA, rb); else BaseType::Build(pScheme, rModelPart, rA, rb); } /** * @brief Function to perform the building and solving phase at the same time. * @details It is ideally the fastest and safer function to use when it is possible to solve * just after building * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { if(rModelPart.MasterSlaveConstraints().size() > 0) BuildAndSolveWithConstraints(pScheme, rModelPart, A, Dx, b); else BaseType::BuildAndSolve(pScheme, rModelPart, A, Dx, b); } /** * @brief Function to perform the build of the RHS. * @details The vector could be sized as the total number of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void BuildRHS( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& b) override { KRATOS_TRY if(rModelPart.MasterSlaveConstraints().size() > 0) BuildRHSWithConstraints(pScheme, rModelPart, b); else BaseType::BuildRHS(pScheme, rModelPart, b); KRATOS_CATCH("") } /** * @brief Builds the list of the DofSets involved in the problem by "asking" to each element * and condition its Dofs. * @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the * way the matrix and RHS are built * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart ) override { if(rModelPart.MasterSlaveConstraints().size() > 0) SetUpDofSetWithConstraints(pScheme, rModelPart); else BaseType::SetUpDofSet(pScheme, rModelPart); } /** * @brief It applies certain operations at the system of equations at the begining of the solution step * @param rModelPart The model part to compute * @param rA The LHS matrix of the system of equations * @param rDx The vector of unkowns * @param rb The RHS vector of the system of equations */ void InitializeSolutionStep( ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY BaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb); // Getting process info const ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // Computing constraints const int n_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); auto constraints_begin = rModelPart.MasterSlaveConstraintsBegin(); #pragma omp parallel for schedule(guided, 512) firstprivate(n_constraints, constraints_begin) for (int k = 0; k < n_constraints; ++k) { auto it = constraints_begin + k; it->InitializeSolutionStep(r_process_info); // Here each constraint constructs and stores its T and C matrices. Also its equation slave_ids. } KRATOS_CATCH("ResidualBasedEliminationBuilderAndSolverWithConstraints failed to initialize solution step.") } /** * @brief It applies certain operations at the system of equations at the end of the solution step * @param rModelPart The model part to compute * @param rA The LHS matrix of the system of equations * @param rDx The vector of unkowns * @param rb The RHS vector of the system of equations */ void FinalizeSolutionStep( ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY BaseType::FinalizeSolutionStep(rModelPart, rA, rDx, rb); // Getting process info const ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // Computing constraints const int n_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); const auto constraints_begin = rModelPart.MasterSlaveConstraintsBegin(); #pragma omp parallel for schedule(guided, 512) firstprivate(n_constraints, constraints_begin) for (int k = 0; k < n_constraints; ++k) { auto it = constraints_begin + k; it->FinalizeSolutionStep(r_process_info); } KRATOS_CATCH("ResidualBasedEliminationBuilderAndSolverWithConstraints failed to finalize solution step.") } /** * @brief This method provides the defaults parameters to avoid conflicts between the different constructors * @return The default parameters */ Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "elimination_builder_and_solver_with_constraints", "check_constraint_relation" : true, "reset_relation_matrix_each_iteration" : true })"); // Getting base class default parameters const Parameters base_default_parameters = BaseType::GetDefaultParameters(); default_parameters.RecursivelyAddMissingParameters(base_default_parameters); return default_parameters; } /** * @brief Returns the name of the class as used in the settings (snake_case format) * @return The name of the class */ static std::string Name() { return "elimination_builder_and_solver_with_constraints"; } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ResidualBasedEliminationBuilderAndSolverWithConstraints"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << Info(); } /// Print object's data. void PrintData(std::ostream& rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ TSystemMatrixPointerType mpTMatrix = NULL; /// This is matrix containing the global relation for the constraints TSystemMatrixPointerType mpOldAMatrix = NULL; /// This is matrix containing the old LHS structure TSystemVectorPointerType mpConstantVector = NULL; /// This is vector containing the rigid movement of the constraint TSystemVectorPointerType mpDeltaConstantVector = NULL; /// This is vector contains the effective constant displacement DofsArrayType mDoFMasterFixedSet; /// The set containing the fixed master DoF of the system DofsArrayType mDoFSlaveSet; /// The set containing the slave DoF of the system SizeType mDoFToSolveSystemSize = 0; /// Number of degrees of freedom of the problem to actually be solved IndexMapType mReactionEquationIdMap; /// In order to know the corresponding EquaionId for each component of the reaction vector bool mCheckConstraintRelation = false; /// If we do a constraint check relation bool mResetRelationMatrixEachIteration = false; /// If we reset the relation matrix at each iteration bool mComputeConstantContribution = false; /// If we compute the constant contribution of the MPC bool mCleared = true; /// If the system has been reseted ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /** * @brief This method assembles the global relation matrix (T matrix used to impose the MPC) * @param rT The global relation matrix * @param rTransformationMatrix The local transformation contribution * @param rSlaveEquationId The equation id of the slave dofs * @param rMasterEquationId The equation id of the master dofs */ void AssembleRelationMatrix( TSystemMatrixType& rT, const LocalSystemMatrixType& rTransformationMatrix, const EquationIdVectorType& rSlaveEquationId, const EquationIdVectorType& rMasterEquationId ) { const SizeType local_size_1 = rTransformationMatrix.size1(); for (IndexType i_local = 0; i_local < local_size_1; ++i_local) { IndexType i_global = rSlaveEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { BaseType::AssembleRowContributionFreeDofs(rT, rTransformationMatrix, i_global, i_local, rMasterEquationId); } } } /** * @brief This method construcs the relationship between the DoF * @param pScheme The integration scheme * @param rA The LHS of the system * @param rModelPart The model part which defines the problem */ void ConstructMatrixStructure( typename TSchemeType::Pointer pScheme, TSystemMatrixType& rA, ModelPart& rModelPart ) override { if(rModelPart.MasterSlaveConstraints().size() > 0) ConstructMatrixStructureWithConstraints(pScheme, rA, rModelPart); else BaseType::ConstructMatrixStructure(pScheme, rA, rModelPart); } /** * @brief The same methods as the base class but with constraints * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rA The LHS matrix of the system of equations * @param rDx The vector of unkowns * @param rb The RHS vector of the system of equations */ void BuildAndSolveWithConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) { KRATOS_TRY Timer::Start("Build"); // We apply the master/slave relationship before build ApplyMasterSlaveRelation(pScheme, rModelPart, rA, rDx, rb); // We compute the effective constant vector TSystemVectorType dummy_Dx(mDoFToSolveSystemSize); TSparseSpace::SetToZero(dummy_Dx); ComputeEffectiveConstant(pScheme, rModelPart, dummy_Dx); // We do the build (after that we resize the solution vector to avoid problems) BuildWithConstraints(pScheme, rModelPart, rA, rb); Timer::Stop("Build"); // Now we apply the BC rDx.resize(mDoFToSolveSystemSize, false); ApplyDirichletConditions(pScheme, rModelPart, rA, rDx, rb); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl; // We solve the system of equations const auto timer = BuiltinTimer(); const double start_solve = timer.ElapsedSeconds(); Timer::Start("Solve"); SystemSolveWithPhysics(rA, rDx, rb, rModelPart); Timer::Stop("Solve"); const double stop_solve = timer.ElapsedSeconds(); // We compute the effective constant vector ComputeEffectiveConstant(pScheme, rModelPart, rDx); // We reconstruct the Unknowns vector and the residual const double start_reconstruct_slaves = timer.ElapsedSeconds(); ReconstructSlaveSolutionAfterSolve(pScheme, rModelPart, rA, rDx, rb); const double stop_reconstruct_slaves = timer.ElapsedSeconds(); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Reconstruct slaves time: " << stop_reconstruct_slaves - start_reconstruct_slaves << std::endl; // Some verbosity KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << rA << "\nUnknowns vector = " << rDx << "\nRHS vector = " << rb << std::endl; KRATOS_CATCH("") } /** * @brief The same methods as the base class but with constraints * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rb The RHS vector of the system of equations */ void BuildRHSWithConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rb ) { Timer::Start("Build RHS"); // Resetting to zero the vector of reactions if(BaseType::mCalculateReactionsFlag) { TSparseSpace::SetToZero(*(BaseType::mpReactionsVector)); } // Builing without BC BuildRHSNoDirichlet(pScheme,rModelPart,rb); Timer::Stop("Build RHS"); ApplyDirichletConditionsRHS(pScheme, rModelPart, rb); // We get the global T matrix const TSystemMatrixType& rTMatrix = *mpTMatrix; // Reconstruct the RHS TSystemVectorType rb_copy = rb; rb.resize(BaseType::mEquationSystemSize, false); TSparseSpace::Mult(rTMatrix, rb_copy, rb); // Adding contribution to reactions TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector; if (BaseType::mCalculateReactionsFlag) { for (auto& r_dof : BaseType::mDofSet) { const bool is_master_fixed = mDoFMasterFixedSet.find(r_dof) == mDoFMasterFixedSet.end() ? false : true; const bool is_slave = mDoFSlaveSet.find(r_dof) == mDoFSlaveSet.end() ? false : true; if (is_master_fixed || is_slave) { // Fixed or MPC dof const IndexType equation_id = r_dof.EquationId(); r_reactions_vector[mReactionEquationIdMap[equation_id]] += rb[equation_id]; } } } // Some verbosity KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nRHS vector = " << rb << std::endl; } /** * @brief Builds the list of the DofSets involved in the problem by "asking" to each element and condition its Dofs. * @details Equivalent to the ResidualBasedEliminationBuilderAndSolver but with constraints. The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the way the matrix and RHS are built * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void SetUpDofSetWithConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart ) { KRATOS_TRY; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Setting up the dofs" << std::endl; DofsVectorType dof_list, second_dof_list; // NOTE: The second dof list is only used on constraints to include master/slave relations typedef std::unordered_set < DofPointerType, DofPointerHasher> set_type; // Declaring temporal variables DofsArrayType dof_temp_all, dof_temp_solvable, dof_temp_slave; // We assign an empty dof array to our dof sets BaseType::mDofSet = DofsArrayType(); /// This corresponds with all the DoF of the system mDoFSlaveSet = DofsArrayType(); /// This corresponds with the slave (the ones not solved after compacting the system using MPC) /** * Here we declare three sets. * - The global set: Contains all the DoF of the system * - The slave set: The DoF that are not going to be solved, due to MPC formulation */ set_type dof_global_set, dof_global_slave_set; #pragma omp parallel firstprivate(dof_list, second_dof_list) { const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // We cleate the temporal set and we reserve some space on them set_type dofs_tmp_set, dof_temp_slave_set; dofs_tmp_set.reserve(20000); dof_temp_slave_set.reserve(200); // Gets the array of elements from the modeler ElementsArrayType& r_elements_array = rModelPart.Elements(); const int number_of_elements = static_cast<int>(r_elements_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_elements; ++i) { auto it_elem = r_elements_array.begin() + i; // Gets list of Dof involved on every element pScheme->GetDofList(*it_elem, dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); } // Gets the array of conditions from the modeler ConditionsArrayType& r_conditions_array = rModelPart.Conditions(); const int number_of_conditions = static_cast<int>(r_conditions_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_conditions; ++i) { auto it_cond = r_conditions_array.begin() + i; // Gets list of Dof involved on every element pScheme->GetDofList(*it_cond, dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); } // Gets the array of constraints from the modeler auto& r_constraints_array = rModelPart.MasterSlaveConstraints(); const int number_of_constraints = static_cast<int>(r_constraints_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_constraints; ++i) { auto it_const = r_constraints_array.begin() + i; // Gets list of Dof involved on every element it_const->GetDofList(dof_list, second_dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); dofs_tmp_set.insert(second_dof_list.begin(), second_dof_list.end()); dof_temp_slave_set.insert(dof_list.begin(), dof_list.end()); } // We merge all the sets in one thread #pragma omp critical { dof_global_set.insert(dofs_tmp_set.begin(), dofs_tmp_set.end()); dof_global_slave_set.insert(dof_temp_slave_set.begin(), dof_temp_slave_set.end()); } } KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 2)) << "Initializing ordered array filling\n" << std::endl; /// We transfer the temporal sets to our DoF set dof_temp_all.reserve(dof_global_set.size()); for (auto p_dof : dof_global_set) { dof_temp_all.push_back( p_dof ); } dof_temp_all.Sort(); BaseType::mDofSet = dof_temp_all; dof_temp_slave.reserve(dof_global_slave_set.size()); for (auto p_dof : dof_global_slave_set) { dof_temp_slave.push_back( p_dof ); } dof_temp_slave.Sort(); mDoFSlaveSet = dof_temp_slave; // Throws an exception if there are no Degrees Of Freedom involved in the analysis KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl; KRATOS_WARNING_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", mDoFSlaveSet.size() == 0) << "No slave degrees of freedom to solve!" << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 2)) << "Number of degrees of freedom:" << BaseType::mDofSet.size() << std::endl; BaseType::mDofSetIsInitialized = true; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished setting up the dofs" << std::endl; #ifdef USE_LOCKS_IN_ASSEMBLY KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 2)) << "Initializing lock array" << std::endl; if (BaseType::mLockArray.size() != 0) { for (int i = 0; i < static_cast<int>(BaseType::mLockArray.size()); ++i) { omp_destroy_lock(&BaseType::mLockArray[i]); } } BaseType::mLockArray.resize(BaseType::mDofSet.size()); for (int i = 0; i < static_cast<int>(BaseType::mLockArray.size()); ++i) { omp_init_lock(&BaseType::mLockArray[i]); } KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", ( this->GetEchoLevel() > 2)) << "End of setup dof set\n" << std::endl; #endif // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode #ifdef KRATOS_DEBUG if(BaseType::GetCalculateReactionsFlag()) { for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl << "Node : " << dof_iterator->Id()<< std::endl << "Dof : " << (*dof_iterator) << std::endl << "Not possible to calculate reactions." << std::endl; } } #endif KRATOS_CATCH(""); } /** * @brief This is a call to the linear system solver (taking into account some physical particularities of the problem) * @param rA The LHS matrix * @param rDx The Unknowns vector * @param rb The RHS vector * @param rModelPart The model part of the problem to solve */ void SystemSolveWithPhysics( TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb, ModelPart& rModelPart ) { KRATOS_TRY double norm_b = 0.0; if (TSparseSpace::Size(rb) > 0) norm_b = TSparseSpace::TwoNorm(rb); if (norm_b > 0.0) { // Create the auxiliar dof set DofsArrayType aux_dof_set; aux_dof_set.reserve(mDoFToSolveSystemSize); for (auto& r_dof : BaseType::mDofSet) { if (r_dof.EquationId() < BaseType::mEquationSystemSize) { auto it = mDoFSlaveSet.find(r_dof); if (it == mDoFSlaveSet.end()) aux_dof_set.push_back( &r_dof ); } } aux_dof_set.Sort(); KRATOS_ERROR_IF_NOT(aux_dof_set.size() == mDoFToSolveSystemSize) << "Inconsistency (I) in system size: " << mDoFToSolveSystemSize << " vs " << aux_dof_set.size() << "\n Size dof set " << BaseType::mDofSet.size() << " vs Size slave dof set " << mDoFSlaveSet.size() << std::endl; KRATOS_ERROR_IF_NOT(aux_dof_set.size() == rA.size1()) << "Inconsistency (II) in system size: " << rA.size1() << " vs " << aux_dof_set.size() << "\n Size dof set " << BaseType::mDofSet.size() << " vs Size slave dof set " << mDoFSlaveSet.size() << std::endl; // Provide physical data as needed if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded()) BaseType::mpLinearSystemSolver->ProvideAdditionalData(rA, rDx, rb, aux_dof_set, rModelPart); // Do solve BaseType::mpLinearSystemSolver->Solve(rA, rDx, rb); } else { TSparseSpace::SetToZero(rDx); KRATOS_WARNING_IF("ResidualBasedEliminationBuilderAndSolver", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl; } // Prints informations about the current time KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** * @brief This function is exactly same as the ConstructMatrixStructure() function in base class except that the function * @details Has the call to ApplyConstraints function call once the element and conditions compute their equation ids * @todo Move this method to a common class with block builder and solver with constraints */ virtual void ConstructMatrixStructureWithConstraints( typename TSchemeType::Pointer pScheme, TSystemMatrixType& rA, ModelPart& rModelPart ) { // Filling with zero the matrix (creating the structure) Timer::Start("MatrixStructure"); // The total number of dof of the system const SizeType equation_size = BaseType::mEquationSystemSize; // This vector contains the indexes sets for all rows std::vector<IndexSetType> indices(equation_size); // We reserve some indexes on each row #pragma omp parallel for firstprivate(equation_size) for (int index = 0; index < static_cast<int>(equation_size); ++index) indices[index].reserve(40); /// Definition of the eqautio id vector type EquationIdVectorType ids(3, 0); EquationIdVectorType second_ids(3, 0); // NOTE: Used only on the constraints to take into account the master dofs #pragma omp parallel firstprivate(ids, second_ids) { // The process info const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // We repeat the same declaration for each thead std::vector<IndexSetType> temp_indexes(equation_size); #pragma omp for for (int index = 0; index < static_cast<int>(equation_size); ++index) temp_indexes[index].reserve(30); // Getting the size of the array of elements from the model const int number_of_elements = static_cast<int>(rModelPart.Elements().size()); // Element initial iterator const auto el_begin = rModelPart.ElementsBegin(); // We iterate over the elements #pragma omp for schedule(guided, 512) nowait for (int i_elem = 0; i_elem<number_of_elements; ++i_elem) { auto it_elem = el_begin + i_elem; pScheme->EquationId(*it_elem, ids, r_current_process_info); for (auto& id_i : ids) { if (id_i < BaseType::mEquationSystemSize) { auto& row_indices = temp_indexes[id_i]; for (auto& id_j : ids) { if (id_j < BaseType::mEquationSystemSize) { row_indices.insert(id_j); } } } } } // Getting the size of the array of the conditions const int number_of_conditions = static_cast<int>(rModelPart.Conditions().size()); // Condition initial iterator const auto cond_begin = rModelPart.ConditionsBegin(); // We iterate over the conditions #pragma omp for schedule(guided, 512) nowait for (int i_cond = 0; i_cond<number_of_conditions; ++i_cond) { auto it_cond = cond_begin + i_cond; pScheme->EquationId(*it_cond, ids, r_current_process_info); for (auto& id_i : ids) { if (id_i < BaseType::mEquationSystemSize) { auto& row_indices = temp_indexes[id_i]; for (auto& id_j : ids) { if (id_j < BaseType::mEquationSystemSize) { row_indices.insert(id_j); } } } } } // Getting the size of the array of the constraints const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); // Constraint initial iterator const auto const_begin = rModelPart.MasterSlaveConstraints().begin(); // We iterate over the constraints #pragma omp for schedule(guided, 512) nowait for (int i_const = 0; i_const < number_of_constraints; ++i_const) { auto it_const = const_begin + i_const; // Detect if the constraint is active or not. If the user did not make any choice the constraint // It is active by default bool constraint_is_active = true; if( it_const->IsDefined(ACTIVE) ) { constraint_is_active = it_const->Is(ACTIVE); } if(constraint_is_active) { it_const->EquationIdVector(ids, second_ids, r_current_process_info); // Slave DoFs for (auto& id_i : ids) { if (id_i < BaseType::mEquationSystemSize) { auto& row_indices = temp_indexes[id_i]; for (auto& id_j : ids) { if (id_j < BaseType::mEquationSystemSize) { row_indices.insert(id_j); } } } } // Master DoFs for (auto& id_i : second_ids) { if (id_i < BaseType::mEquationSystemSize) { auto& row_indices = temp_indexes[id_i]; for (auto& id_j : second_ids) { if (id_j < BaseType::mEquationSystemSize) { row_indices.insert(id_j); } } } } } } // Merging all the temporal indexes #pragma omp critical { for (int i = 0; i < static_cast<int>(temp_indexes.size()); ++i) { indices[i].insert(temp_indexes[i].begin(), temp_indexes[i].end()); } } } // Count the row sizes SizeType nnz = 0; for (IndexType i = 0; i < indices.size(); ++i) nnz += indices[i].size(); rA = CompressedMatrixType(indices.size(), indices.size(), nnz); double *Avalues = rA.value_data().begin(); IndexType *Arow_indices = rA.index1_data().begin(); IndexType *Acol_indices = rA.index2_data().begin(); // Filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! Arow_indices[0] = 0; for (int i = 0; i < static_cast<int>(rA.size1()); i++) Arow_indices[i + 1] = Arow_indices[i] + indices[i].size(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(rA.size1()); ++i) { const IndexType row_begin = Arow_indices[i]; const IndexType row_end = Arow_indices[i + 1]; IndexType k = row_begin; for (auto it = indices[i].begin(); it != indices[i].end(); ++it) { Acol_indices[k] = *it; Avalues[k] = 0.0; k++; } indices[i].clear(); //deallocating the memory std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]); } rA.set_filled(indices.size() + 1, nnz); Timer::Stop("MatrixStructure"); } /** * @brief This function is exactly same as the ConstructMatrixStructure() function in base class except that the function has the call to ApplyConstraints function call once the element and conditions compute their equation slave_ids * @param pScheme The pointer to the integration scheme * @param rT The global relation matrix * @param rModelPart The model part to compute */ virtual void ConstructRelationMatrixStructure( typename TSchemeType::Pointer pScheme, TSystemMatrixType& rT, ModelPart& rModelPart ) { // Filling with zero the matrix (creating the structure) Timer::Start("RelationMatrixStructure"); IndexMapType solvable_dof_reorder; std::unordered_map<IndexType, IndexSetType> master_indices; // Filling with "ones" typedef std::pair<IndexType, IndexType> IndexIndexPairType; typedef std::pair<IndexType, IndexSetType> IndexIndexSetPairType; IndexType counter = 0; for (auto& dof : BaseType::mDofSet) { if (dof.EquationId() < BaseType::mEquationSystemSize) { const IndexType equation_id = dof.EquationId(); auto it = mDoFSlaveSet.find(dof); if (it == mDoFSlaveSet.end()) { solvable_dof_reorder.insert(IndexIndexPairType(equation_id, counter)); master_indices.insert(IndexIndexSetPairType(equation_id, IndexSetType({counter}))); ++counter; } else { master_indices.insert(IndexIndexSetPairType(equation_id, IndexSetType({}))); } } } // The process info const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); /// Definition of the eqautio id vector type EquationIdVectorType ids(3, 0); EquationIdVectorType second_ids(3, 0); // NOTE: Used only on the constraints to take into account the master dofs const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); const auto it_const_begin = rModelPart.MasterSlaveConstraints().begin(); // TODO: OMP for (int i_const = 0; i_const < number_of_constraints; ++i_const) { auto it_const = it_const_begin + i_const; // Detect if the constraint is active or not. If the user did not make any choice the constraint // It is active by default bool constraint_is_active = true; if( it_const->IsDefined(ACTIVE) ) { constraint_is_active = it_const->Is(ACTIVE); } if(constraint_is_active) { it_const->EquationIdVector(ids, second_ids, r_current_process_info); for (auto& slave_id : ids) { if (slave_id < BaseType::mEquationSystemSize) { auto it_slave = solvable_dof_reorder.find(slave_id); if (it_slave == solvable_dof_reorder.end()) { for (auto& master_id : second_ids) { if (master_id < BaseType::mEquationSystemSize) { auto& master_row_indices = master_indices[slave_id]; master_row_indices.insert(solvable_dof_reorder[master_id]); } } } } } } } KRATOS_DEBUG_ERROR_IF_NOT(BaseType::mEquationSystemSize == master_indices.size()) << "Inconsistency in the dofs size: " << BaseType::mEquationSystemSize << "\t vs \t" << master_indices.size() << std::endl; // Count the row sizes SizeType nnz = 0; for (IndexType i = 0; i < BaseType::mEquationSystemSize; ++i) { nnz += master_indices[i].size(); } rT = CompressedMatrixType(BaseType::mEquationSystemSize, mDoFToSolveSystemSize, nnz); double *Tvalues = rT.value_data().begin(); IndexType *Trow_indices = rT.index1_data().begin(); IndexType *Tcol_indices = rT.index2_data().begin(); // Filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! Trow_indices[0] = 0; for (IndexType i = 0; i < BaseType::mEquationSystemSize; ++i) Trow_indices[i + 1] = Trow_indices[i] + master_indices[i].size(); KRATOS_DEBUG_ERROR_IF_NOT(Trow_indices[BaseType::mEquationSystemSize] == nnz) << "Nonzero values does not coincide with the row index definition: " << Trow_indices[BaseType::mEquationSystemSize] << " vs " << nnz << std::endl; #pragma omp parallel for for (int i = 0; i < static_cast<int>(rT.size1()); ++i) { const IndexType row_begin = Trow_indices[i]; const IndexType row_end = Trow_indices[i + 1]; IndexType k = row_begin; for (auto it = master_indices[i].begin(); it != master_indices[i].end(); ++it) { Tcol_indices[k] = *it; Tvalues[k] = 0.0; k++; } master_indices[i].clear(); //deallocating the memory std::sort(&Tcol_indices[row_begin], &Tcol_indices[row_end]); } rT.set_filled(BaseType::mEquationSystemSize + 1, nnz); // Setting ones for (auto& solv_dof : solvable_dof_reorder) { rT(solv_dof.first, solv_dof.second) = 1.0; } Timer::Stop("RelationMatrixStructure"); } /** * @brief This function is exactly same as the Build() function in base class except that the function * @details It has the call to ApplyConstraints function call once the LHS or RHS are computed by elements and conditions * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rb The RHS vector * @param UseBaseBuild If the abse Build function will be used */ void BuildWithConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rb, const bool UseBaseBuild = true ) { KRATOS_TRY // We build the original system if (UseBaseBuild) BaseType::Build(pScheme, rModelPart, rA, rb); else BuildWithoutConstraints(pScheme, rModelPart, rA, rb); // Assemble the constraints const auto timer = BuiltinTimer(); // We get the global T matrix const TSystemMatrixType& rTMatrix = *mpTMatrix; // We compute only once (or if cleared) if (mCleared) { mCleared = false; ComputeConstraintContribution(pScheme, rModelPart, true, mComputeConstantContribution); } else if (mResetRelationMatrixEachIteration) { ResetConstraintSystem(); ComputeConstraintContribution(pScheme, rModelPart, mResetRelationMatrixEachIteration, mComputeConstantContribution); } // We compute the transposed matrix of the global relation matrix TSystemMatrixType T_transpose_matrix(mDoFToSolveSystemSize, BaseType::mEquationSystemSize); SparseMatrixMultiplicationUtility::TransposeMatrix<TSystemMatrixType, TSystemMatrixType>(T_transpose_matrix, rTMatrix, 1.0); // The proper way to include the constants is in the RHS as T^t(f - A * g) TSystemVectorType rb_copy = rb; if (mComputeConstantContribution) { // We get the g constant vector TSystemVectorType& rDeltaConstantVector = *mpDeltaConstantVector; TSystemVectorType aux_constant_vector(rDeltaConstantVector); TSparseSpace::Mult(rA, rDeltaConstantVector, aux_constant_vector); TSparseSpace::UnaliasedAdd(rb_copy, -1.0, aux_constant_vector); } // The auxiliar matrix to store the intermediate matrix multiplication TSystemMatrixType auxiliar_A_matrix(mDoFToSolveSystemSize, BaseType::mEquationSystemSize); SparseMatrixMultiplicationUtility::MatrixMultiplication(T_transpose_matrix, rA, auxiliar_A_matrix); // We do a backup of the matrix before apply the constraints if (mpOldAMatrix == NULL) { // If the pointer is not initialized initialize it to an empty matrix TSystemMatrixPointerType pNewOldAMatrix = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); mpOldAMatrix.swap(pNewOldAMatrix); } (*mpOldAMatrix).swap(rA); // We resize of system of equations rA.resize(mDoFToSolveSystemSize, mDoFToSolveSystemSize, false); rb.resize(mDoFToSolveSystemSize, false); // Final multiplication SparseMatrixMultiplicationUtility::MatrixMultiplication(auxiliar_A_matrix, rTMatrix, rA); TSparseSpace::Mult(T_transpose_matrix, rb_copy, rb); // Cleaning up memory auxiliar_A_matrix.resize(0, 0, false); T_transpose_matrix.resize(0, 0, false); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", this->GetEchoLevel() >= 1) << "Constraint relation build time and multiplication: " << timer.ElapsedSeconds() << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", this->GetEchoLevel() > 2) << "Finished parallel building with constraints" << std::endl; KRATOS_CATCH("") } /** * @brief Function to perform the build of the RHS. * @details The vector could be sized as the total number of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rb The RHS of the system */ void BuildRHSNoDirichlet( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rb ) { KRATOS_TRY // Assemble the constraints const auto timer = BuiltinTimer(); // We get the global T matrix const TSystemMatrixType& rTMatrix = *mpTMatrix; // We compute only once (or if cleared) if (mCleared) { mCleared = false; ComputeConstraintContribution(pScheme, rModelPart, true, mComputeConstantContribution); } else if (mResetRelationMatrixEachIteration) { ResetConstraintSystem(); ComputeConstraintContribution(pScheme, rModelPart, mResetRelationMatrixEachIteration, mComputeConstantContribution); } // We compute the transposed matrix of the global relation matrix TSystemMatrixType T_transpose_matrix(mDoFToSolveSystemSize, BaseType::mEquationSystemSize); SparseMatrixMultiplicationUtility::TransposeMatrix<TSystemMatrixType, TSystemMatrixType>(T_transpose_matrix, rTMatrix, 1.0); // We build the original system TSystemMatrixType A; // Dummy auxiliar matrix we ned to build anyway because are needed to impose the rigid displacements if (mComputeConstantContribution) { A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false); ConstructMatrixStructure(pScheme, A, rModelPart); BuildWithoutConstraints(pScheme, rModelPart, A, rb); } else { BuildRHSNoDirichletWithoutConstraints(pScheme, rModelPart, rb); } // The proper way to include the constants is in the RHS as T^t(f - A * g) TSystemVectorType rb_copy = rb; if (mComputeConstantContribution) { // We get the g constant vector TSystemVectorType& rDeltaConstantVector = *mpDeltaConstantVector; TSystemVectorType aux_constant_vector(rDeltaConstantVector); TSparseSpace::Mult(A, rDeltaConstantVector, aux_constant_vector); TSparseSpace::UnaliasedAdd(rb_copy, -1.0, aux_constant_vector); } rb.resize(mDoFToSolveSystemSize, false); // Final multiplication TSparseSpace::Mult(T_transpose_matrix, rb_copy, rb); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", this->GetEchoLevel() >= 1) << "Constraint relation build time and multiplication: " << timer.ElapsedSeconds() << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", this->GetEchoLevel() > 2) << "Finished parallel building with constraints" << std::endl; KRATOS_CATCH("") } /** * @brief This method resize and initializes the system of euqations * @details Additionally what is done in the base class the constraints are initialized * @param pA The pointer to the LHS matrix * @param pDx The pointer to the vector of Unknowns * @param pb The pointer to the RHS vector * @param rModelPart The model part to be computed */ void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType& pA, TSystemVectorPointerType& pDx, TSystemVectorPointerType& pb, ModelPart& rModelPart ) override { // We resize the basic system BaseType::ResizeAndInitializeVectors(pScheme, pA, pDx, pb, rModelPart); // If needed resize the vector for the calculation of reactions if (BaseType::mCalculateReactionsFlag) { const SizeType reactions_vector_size = BaseType::mDofSet.size() - mDoFToSolveSystemSize + mDoFMasterFixedSet.size(); TSystemVectorType& rReactionsVector = *(BaseType::mpReactionsVector); if (rReactionsVector.size() != reactions_vector_size) rReactionsVector.resize(reactions_vector_size, false); } // Now we resize the relation matrix used on the MPC solution if(rModelPart.MasterSlaveConstraints().size() > 0) { if (mpTMatrix == NULL) { // If the pointer is not initialized initialize it to an empty matrix TSystemMatrixPointerType pNewT = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); mpTMatrix.swap(pNewT); } // The rigid movement if (mpConstantVector == NULL) { // If the pointer is not initialized initialize it to an empty vector TSystemVectorPointerType pNewConstantVector = TSystemVectorPointerType(new TSystemVectorType(0)); mpConstantVector.swap(pNewConstantVector); } // The effective rigid movement if (mpDeltaConstantVector == NULL) { // If the pointer is not initialized initialize it to an empty vector TSystemVectorPointerType pNewConstantVector = TSystemVectorPointerType(new TSystemVectorType(0)); mpDeltaConstantVector.swap(pNewConstantVector); } // System matrices/vectors TSystemMatrixType& rTMatrix = *mpTMatrix; TSystemVectorType& rConstantVector = *mpConstantVector; TSystemVectorType& rDeltaConstantVector = *mpDeltaConstantVector; // Resizing the system matrix if (rTMatrix.size1() == 0 || BaseType::GetReshapeMatrixFlag() || mCleared) { // If the matrix is not initialized rTMatrix.resize(BaseType::mEquationSystemSize, mDoFToSolveSystemSize, false); ConstructRelationMatrixStructure(pScheme, rTMatrix, rModelPart); } else { if (rTMatrix.size1() != BaseType::mEquationSystemSize || rTMatrix.size2() != mDoFToSolveSystemSize) { KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl; rTMatrix.resize(BaseType::mEquationSystemSize, mDoFToSolveSystemSize, false); ConstructRelationMatrixStructure(pScheme, rTMatrix, rModelPart); } } // Resizing the system vector // The rigid movement if (rConstantVector.size() != BaseType::mEquationSystemSize || BaseType::GetReshapeMatrixFlag() || mCleared) { rConstantVector.resize(BaseType::mEquationSystemSize, false); mComputeConstantContribution = ComputeConstraintContribution(pScheme, rModelPart); } else { if (rConstantVector.size() != BaseType::mEquationSystemSize) { KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl; rConstantVector.resize(BaseType::mEquationSystemSize, false); mComputeConstantContribution = ComputeConstraintContribution(pScheme, rModelPart); } } // The effective rigid movement if (mComputeConstantContribution) { if (rDeltaConstantVector.size() != BaseType::mEquationSystemSize || BaseType::GetReshapeMatrixFlag() || mCleared) { rDeltaConstantVector.resize(BaseType::mEquationSystemSize, false); } else { if (rDeltaConstantVector.size() != BaseType::mEquationSystemSize) { KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl; rDeltaConstantVector.resize(BaseType::mEquationSystemSize, false); } } } } } /** * @brief It computes the reactions of the system * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rA The LHS matrix of the system of equations * @param rDx The vector of unkowns * @param rb The RHS vector of the system of equations */ void CalculateReactions( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY // Refresh RHS to have the correct reactions BuildRHS(pScheme, rModelPart, rb); // Adding contribution to reactions TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector; // Updating variables for (auto& r_dof : BaseType::mDofSet) { if ((r_dof.IsFixed()) || mDoFSlaveSet.find(r_dof) != mDoFSlaveSet.end()) { r_dof.GetSolutionStepReactionValue() = -r_reactions_vector[mReactionEquationIdMap[r_dof.EquationId()]]; } } KRATOS_CATCH("ResidualBasedEliminationBuilderAndSolverWithConstraints::CalculateReactions failed .."); } /** * @brief Applies the dirichlet conditions. This operation may be very heavy or completely unexpensive depending on the implementation choosen and on how the System Matrix is built. * @details In the base ResidualBasedEliminationBuilderAndSolver does nothing, due to the fact that the BC are automatically managed with the elimination. But in the constrints approach the slave DoF depending on fixed DoFs must be reconstructed * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rDx The Unknowns vector * @param rb The RHS vector */ void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) override { KRATOS_TRY; if (mDoFMasterFixedSet.size() > 0) { // We apply the same method as in the block builder and solver but instead of fixing the fixed Dofs, we just fix the master fixed Dofs std::vector<double> scaling_factors (mDoFToSolveSystemSize, 0.0); // NOTE: Dofs are assumed to be numbered consecutively const auto it_dof_begin = BaseType::mDofSet.begin(); IndexType counter = 0; for (IndexType i = 0; i < BaseType::mDofSet.size(); ++i) { auto it_dof = it_dof_begin + i; const IndexType equation_id = it_dof->EquationId(); if (equation_id < BaseType::mEquationSystemSize ) { auto it_first_check = mDoFSlaveSet.find(*it_dof); if (it_first_check == mDoFSlaveSet.end()) { auto it_second_check = mDoFSlaveSet.find(*it_dof); if (it_second_check == mDoFSlaveSet.end()) { if(mDoFMasterFixedSet.find(*it_dof) == mDoFMasterFixedSet.end()) { scaling_factors[counter] = 1.0; } } counter += 1; } } } double* Avalues = rA.value_data().begin(); IndexType* Arow_indices = rA.index1_data().begin(); IndexType* Acol_indices = rA.index2_data().begin(); // Detect if there is a line of all zeros and set the diagonal to a 1 if this happens #pragma omp parallel for for(int k = 0; k < static_cast<int>(mDoFToSolveSystemSize); ++k) { const IndexType col_begin = Arow_indices[k]; const IndexType col_end = Arow_indices[k+1]; bool empty = true; for (IndexType j = col_begin; j < col_end; ++j) { if(Avalues[j] != 0.0) { empty = false; break; } } if(empty) { rA(k,k) = 1.0; rb[k] = 0.0; } } #pragma omp parallel for for (int k = 0; k < static_cast<int>(mDoFToSolveSystemSize); ++k) { const IndexType col_begin = Arow_indices[k]; const IndexType col_end = Arow_indices[k+1]; const double k_factor = scaling_factors[k]; if (k_factor == 0) { // Zero out the whole row, except the diagonal for (IndexType j = col_begin; j < col_end; ++j) if (static_cast<int>(Acol_indices[j]) != k ) Avalues[j] = 0.0; // Zero out the RHS rb[k] = 0.0; } else { // Zero out the column which is associated with the zero'ed row for (IndexType j = col_begin; j < col_end; ++j) { if(scaling_factors[ Acol_indices[j] ] == 0 ) { Avalues[j] = 0.0; } } } } } KRATOS_CATCH(""); } /** * @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed */ void Clear() override { BaseType::Clear(); // Reseting auxiliar set of dofs mDoFMasterFixedSet = DofsArrayType(); mDoFSlaveSet = DofsArrayType(); // Clearing the relation map mReactionEquationIdMap.clear(); // Clear constraint system if (mpTMatrix != nullptr) TSparseSpace::Clear(mpTMatrix); if (mpConstantVector != nullptr) TSparseSpace::Clear(mpConstantVector); if (mpDeltaConstantVector != nullptr) TSparseSpace::Clear(mpDeltaConstantVector); // Set the flag mCleared = true; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolverWithConstraints", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl; } /** * @brief This method assigns settings to member variables * @param ThisParameters Parameters that are assigned to the member variables */ void AssignSettings(const Parameters ThisParameters) override { BaseType::AssignSettings(ThisParameters); mCheckConstraintRelation = ThisParameters["check_constraint_relation"].GetBool(); mResetRelationMatrixEachIteration = ThisParameters["reset_relation_matrix_each_iteration"].GetBool(); } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ /** * @brief This method computes the equivalent coounter part of the SetUpSystem when using constraints * @param rModelPart The model part of the problem to solve */ void SetUpSystemWithConstraints(ModelPart& rModelPart) { KRATOS_TRY // First we set up the system of equations without constraints // Set equation id for degrees of freedom the free degrees of freedom are positioned at the beginning of the system, while the fixed one are at the end (in opposite order). // // That means that if the EquationId is greater than "mEquationSystemSize" the pointed degree of freedom is restrained // This is almost the same SetUpSystem from ResidualBasedEliminationBuilderAndSolver, but we don't discard from the system the fixed dofs that are part of a constraint at the same time /// First we detect the master fixed DoFs /// // The current process info const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Vector containing the localization in the system of the different terms DofsVectorType slave_dof_list, master_dof_list; // Declaring temporal variables DofsArrayType dof_temp_fixed_master; typedef std::unordered_set < DofPointerType, DofPointerHasher> set_type; set_type dof_global_fixed_master_set; // Iterate over constraints const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); const auto it_const_begin = rModelPart.MasterSlaveConstraints().begin(); #pragma omp parallel firstprivate(slave_dof_list, master_dof_list) { // We cleate the temporal set and we reserve some space on them set_type dof_temp_fixed_master_set; dof_temp_fixed_master_set.reserve(2000); #pragma omp for schedule(guided, 512) nowait for (int i_const = 0; i_const < number_of_constraints; ++i_const) { auto it_const = it_const_begin + i_const; // Detect if the constraint is active or not. If the user did not make any choice the constraint // It is active by default bool constraint_is_active = true; if (it_const->IsDefined(ACTIVE)) constraint_is_active = it_const->Is(ACTIVE); if (constraint_is_active) { it_const->GetDofList(slave_dof_list, master_dof_list, r_current_process_info); // Filling the set of dofs master and fixed at the same time for (auto& master_dof : master_dof_list) { if (master_dof->IsFixed()) { dof_temp_fixed_master_set.insert(master_dof); } } } } // We merge all the sets in one thread #pragma omp critical { dof_global_fixed_master_set.insert(dof_temp_fixed_master_set.begin(), dof_temp_fixed_master_set.end()); } } dof_temp_fixed_master.reserve(dof_global_fixed_master_set.size()); for (auto p_dof : dof_global_fixed_master_set) { dof_temp_fixed_master.push_back( p_dof ); } dof_temp_fixed_master.Sort(); mDoFMasterFixedSet = dof_temp_fixed_master; /// Now we compute as expected /// int free_id = 0; int fix_id = BaseType::mDofSet.size(); for (auto& dof : BaseType::mDofSet) { if (dof.IsFixed()) { auto it = mDoFMasterFixedSet.find(dof); if (it == mDoFMasterFixedSet.end()) { dof.SetEquationId(--fix_id); } else { dof.SetEquationId(free_id++); } } else { dof.SetEquationId(free_id++); } } BaseType::mEquationSystemSize = fix_id; // Add the computation of the global ids of the solvable dofs IndexType counter = 0; for (auto& dof : BaseType::mDofSet) { if (dof.EquationId() < BaseType::mEquationSystemSize) { auto it = mDoFSlaveSet.find(dof); if (it == mDoFSlaveSet.end()) { ++counter; } } } // The total system of equations to be solved mDoFToSolveSystemSize = counter; // Finally we build the relation between the EquationID and the component of the reaction counter = 0; for (auto& r_dof : BaseType::mDofSet) { const bool is_master_fixed = mDoFMasterFixedSet.find(r_dof) == mDoFMasterFixedSet.end() ? false : true; const bool is_slave = mDoFSlaveSet.find(r_dof) == mDoFSlaveSet.end() ? false : true; if (is_master_fixed || is_slave) { // Fixed or MPC dof mReactionEquationIdMap.insert({r_dof.EquationId(), counter}); ++counter; } } KRATOS_CATCH("ResidualBasedEliminationBuilderAndSolverWithConstraints::SetUpSystemWithConstraints failed .."); } /** * @brief This method initializes the DoF using the master/slave relationship * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rA The LHS matrix of the system of equations * @param rDx The vector of unkowns * @param rb The RHS vector of the system of equations */ void ApplyMasterSlaveRelation( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) { KRATOS_TRY // First we reset the slave dofs ConstraintUtilities::ResetSlaveDofs(rModelPart); // Now we apply the constraints ConstraintUtilities::ApplyConstraints(rModelPart); KRATOS_CATCH(""); } /** * @brief This method checks that the master/slave relation is properly set * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rDx The vector of unkowns * @param rDxSolved The vector of unkowns actually solved */ bool CheckMasterSlaveRelation( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rDx, TSystemVectorType& rDxSolved ) { KRATOS_TRY // Auxiliar values const auto it_dof_begin = BaseType::mDofSet.begin(); TSystemVectorType current_solution(mDoFToSolveSystemSize); TSystemVectorType updated_solution(BaseType::mEquationSystemSize); TSystemVectorType residual_solution(BaseType::mEquationSystemSize); // Get current values IndexType counter = 0; for (IndexType i = 0; i < BaseType::mDofSet.size(); ++i) { auto it_dof = it_dof_begin + i; const IndexType equation_id = it_dof->EquationId(); if (equation_id < BaseType::mEquationSystemSize ) { auto it = mDoFSlaveSet.find(*it_dof); if (it == mDoFSlaveSet.end()) { current_solution[counter] = it_dof->GetSolutionStepValue() + rDxSolved[counter]; counter += 1; } } } #pragma omp parallel for for (int i = 0; i < static_cast<int>(BaseType::mDofSet.size()); ++i) { auto it_dof = it_dof_begin + i; const IndexType equation_id = it_dof->EquationId(); if (equation_id < BaseType::mEquationSystemSize ) { residual_solution[equation_id] = it_dof->GetSolutionStepValue() + rDx[equation_id]; } } // Apply master slave constraints const TSystemMatrixType& rTMatrix = *mpTMatrix; TSparseSpace::Mult(rTMatrix, current_solution, updated_solution); if (mComputeConstantContribution) { ComputeConstraintContribution(pScheme, rModelPart, false, true); const TSystemVectorType& rConstantVector = *mpConstantVector; TSparseSpace::UnaliasedAdd(updated_solution, 1.0, rConstantVector); } TSparseSpace::UnaliasedAdd(residual_solution, -1.0, updated_solution); // Check database for(int k = 0; k < static_cast<int>(BaseType::mEquationSystemSize); ++k) { if (std::abs(residual_solution[k]) > std::numeric_limits<double>::epsilon()) return false; } return true; KRATOS_CATCH(""); } /** * @brief This method reconstructs the slave solution after Solving. * @param pScheme The pointer to the integration scheme * @param rModelPart Reference to the ModelPart containing the problem. * @param rA System matrix * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void ReconstructSlaveSolutionAfterSolve( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rDx, TSystemVectorType& rb ) { KRATOS_TRY // We get the global T matrix and the constant vector const TSystemMatrixType& rTMatrix = *mpTMatrix; // We reconstruct the complete vector of Unknowns TSystemVectorType Dx_copy = rDx; rDx.resize(BaseType::mEquationSystemSize); TSparseSpace::Mult(rTMatrix, Dx_copy, rDx); // Add the constant vector if (mComputeConstantContribution) { const TSystemVectorType& rDeltaConstantVector = *mpDeltaConstantVector; TSparseSpace::UnaliasedAdd(rDx, 1.0, rDeltaConstantVector); } // We check the solution if (mCheckConstraintRelation) { KRATOS_ERROR_IF_NOT(CheckMasterSlaveRelation(pScheme, rModelPart, rDx, Dx_copy)) << "The relation between master/slave dofs is not respected" << std::endl; } // Simply restore old LHS (rA).swap(*mpOldAMatrix); mpOldAMatrix = NULL; // Reconstruct the RHS TSystemVectorType rb_copy = rb; rb.resize(BaseType::mEquationSystemSize, false); TSparseSpace::Mult(rTMatrix, rb_copy, rb); KRATOS_CATCH("ResidualBasedEliminationBuilderAndSolverWithConstraints::ReconstructSlaveSolutionAfterSolve failed .."); } /** * @brief Function to perform the build the system without constraints * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rA The LHS matrix * @param rb The RHS vector */ void BuildWithoutConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& rA, TSystemVectorType& rb ) { // The current process info const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Getting the array of elements ElementsArrayType& r_elements_array = rModelPart.Elements(); // Getting the array of the conditions ConditionsArrayType& r_conditons_array = rModelPart.Conditions(); // Contributions to the system LocalSystemMatrixType lhs_contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType rhs_contribution = LocalSystemVectorType(0); // Vector containing the localization in the system of the different terms Element::EquationIdVectorType equation_id; // Assemble all elements and conditions #pragma omp parallel firstprivate( lhs_contribution, rhs_contribution, equation_id) { // Elements const auto it_elem_begin = r_elements_array.begin(); const int nelements = static_cast<int>(r_elements_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i<nelements; ++i) { auto it_elem = it_elem_begin + i; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool element_is_active = true; if (it_elem->IsDefined(ACTIVE)) element_is_active = it_elem->Is(ACTIVE); if (element_is_active) { // Calculate elemental contribution pScheme->CalculateSystemContributions(*it_elem, lhs_contribution, rhs_contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleWithoutConstraints(rA, rb, lhs_contribution, rhs_contribution, equation_id); } } // Conditions const auto it_cond_begin = r_conditons_array.begin(); const int nconditions = static_cast<int>(r_conditons_array.size()); #pragma omp for schedule(guided, 512) for (int i = 0; i<nconditions; ++i) { auto it_cond = it_cond_begin + i; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool condition_is_active = true; if (it_cond->IsDefined(ACTIVE)) condition_is_active = it_cond->Is(ACTIVE); if (condition_is_active) { // Calculate elemental contribution pScheme->CalculateSystemContributions(*it_cond, lhs_contribution, rhs_contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleWithoutConstraints(rA, rb, lhs_contribution, rhs_contribution, equation_id); } } } } /** * @brief Function to perform the build of the RHS without constraints * @details The vector could be sized as the total number of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param rb The RHS of the system */ void BuildRHSNoDirichletWithoutConstraints( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rb ) { // The current process info const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Getting the array of elements ElementsArrayType& r_elements_array = rModelPart.Elements(); // Getting the array of the conditions ConditionsArrayType& r_conditons_array = rModelPart.Conditions(); // Contributions to the system LocalSystemVectorType rhs_contribution = LocalSystemVectorType(0); // Vector containing the localization in the system of the different terms Element::EquationIdVectorType equation_id; // Assemble all elements and conditions #pragma omp parallel firstprivate( rhs_contribution, equation_id) { // Elements const auto it_elem_begin = r_elements_array.begin(); const int nelements = static_cast<int>(r_elements_array.size()); #pragma omp for schedule(guided, 512) nowait for (int i = 0; i<nelements; ++i) { auto it_elem = it_elem_begin + i; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool element_is_active = true; if (it_elem->IsDefined(ACTIVE)) element_is_active = it_elem->Is(ACTIVE); if (element_is_active) { // Calculate elemental Right Hand Side Contribution pScheme->CalculateRHSContribution(*it_elem, rhs_contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleRHSWithoutConstraints(rb, rhs_contribution, equation_id); } } // Conditions const auto it_cond_begin = r_conditons_array.begin(); const int nconditions = static_cast<int>(r_conditons_array.size()); #pragma omp for schedule(guided, 512) for (int i = 0; i<nconditions; ++i) { auto it_cond = it_cond_begin + i; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool condition_is_active = true; if (it_cond->IsDefined(ACTIVE)) condition_is_active = it_cond->Is(ACTIVE); if (condition_is_active) { // Calculate elemental contribution pScheme->CalculateRHSContribution(*it_cond, rhs_contribution, equation_id, r_current_process_info); // Assemble the elemental contribution AssembleRHSWithoutConstraints(rb, rhs_contribution, equation_id); } } } } /** * @brief This function does the assembling of the LHS and RHS * @note The main difference respect the block builder and solver is the fact that the fixed DoFs are not considered on the assembling */ void AssembleWithoutConstraints( TSystemMatrixType& rA, TSystemVectorType& rb, const LocalSystemMatrixType& rLHSContribution, const LocalSystemVectorType& rRHSContribution, const Element::EquationIdVectorType& rEquationId ) { const SizeType local_size = rLHSContribution.size1(); // Assemble RHS AssembleRHSWithoutConstraints(rb, rRHSContribution, rEquationId); // Assemble LHS for (IndexType i_local = 0; i_local < local_size; ++i_local) { const IndexType i_global = rEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { BaseType::AssembleRowContributionFreeDofs(rA, rLHSContribution, i_global, i_local, rEquationId); } } } /** * @brief Assembling local contribution of nodes and elements in the RHS * @param rb The RHS vector */ void AssembleRHSWithoutConstraints( TSystemVectorType& rb, const LocalSystemVectorType& rRHSContribution, const Element::EquationIdVectorType& rEquationId ) { const SizeType local_size = rRHSContribution.size(); if (!BaseType::mCalculateReactionsFlag) { for (IndexType i_local = 0; i_local < local_size; ++i_local) { const IndexType i_global = rEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { // free dof // ASSEMBLING THE SYSTEM VECTOR double& r_b_value = rb[i_global]; const double rhs_value = rRHSContribution[i_local]; AtomicAdd(r_b_value, rhs_value); } } } else { TSystemVectorType& r_reactions_vector = *BaseType::mpReactionsVector; for (IndexType i_local = 0; i_local < local_size; ++i_local) { const IndexType i_global = rEquationId[i_local]; auto it_dof = BaseType::mDofSet.begin() + i_global; const bool is_master_fixed = mDoFMasterFixedSet.find(*it_dof) == mDoFMasterFixedSet.end() ? false : true; const bool is_slave = mDoFSlaveSet.find(*it_dof) == mDoFSlaveSet.end() ? false : true; if (is_master_fixed || is_slave) { // Fixed or MPC dof double& r_b_value = r_reactions_vector[mReactionEquationIdMap[i_global]]; const double rhs_value = rRHSContribution[i_local]; AtomicAdd(r_b_value, rhs_value); } else if (it_dof->IsFree()) { // Free dof not in the MPC // ASSEMBLING THE SYSTEM VECTOR double& r_b_value = rb[i_global]; const double& rhs_value = rRHSContribution[i_local]; AtomicAdd(r_b_value, rhs_value); } } } } /** * @brief This method set to zero the relation matrix */ void ResetConstraintSystem() { TSystemMatrixType& rTMatrix = *mpTMatrix; double *Tvalues = rTMatrix.value_data().begin(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(rTMatrix.nnz()); ++i) { Tvalues[i] = 0.0; } IndexMapType solvable_dof_reorder; // Filling with "ones" typedef std::pair<IndexType, IndexType> IndexIndexPairType; IndexType counter = 0; for (auto& dof : BaseType::mDofSet) { if (dof.EquationId() < BaseType::mEquationSystemSize) { const IndexType equation_id = dof.EquationId(); auto it = mDoFSlaveSet.find(dof); if (it == mDoFSlaveSet.end()) { solvable_dof_reorder.insert(IndexIndexPairType(equation_id, counter)); ++counter; } } } // Setting ones for (auto& solv_dof : solvable_dof_reorder) { rTMatrix(solv_dof.first, solv_dof.second) = 1.0; } if (mComputeConstantContribution) { TSystemVectorType& rConstantVector = *mpConstantVector; TSparseSpace::SetToZero(rConstantVector); } } /** * @brief This method applies the BC, only in the RHS * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rb The RHS vector of the system of equations */ void ApplyDirichletConditionsRHS( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rb ) { KRATOS_TRY; if (mDoFMasterFixedSet.size() > 0) { // NOTE: dofs are assumed to be numbered consecutively const auto it_dof_begin = BaseType::mDofSet.begin(); #pragma omp parallel for for(int k = 0; k < static_cast<int>(mDoFToSolveSystemSize); ++k) { auto it_dof = it_dof_begin + k; if (k < static_cast<int>(BaseType::mEquationSystemSize)) { auto it = mDoFSlaveSet.find(*it_dof); if (it == mDoFSlaveSet.end()) { if(mDoFMasterFixedSet.find(*it_dof) != mDoFMasterFixedSet.end()) { rb[k] = 0.0; } } } } } KRATOS_CATCH(""); } /** * @brief This method computes the absolute constant contribution of the MPC * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param ComputeTranslationMatrix If the translation matrix will be assembled * @param ComputeConstantVector If the constant vector will be assembled * @return If there are constant constraints */ bool ComputeConstraintContribution( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, const bool ComputeTranslationMatrix = false, const bool ComputeConstantVector = false ) { KRATOS_TRY; // We build the global T matrix and the g constant vector TSystemMatrixType& rTMatrix = *mpTMatrix; TSystemVectorType& rConstantVector = *mpConstantVector; // Filling constant vector if (ComputeConstantVector) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(BaseType::mEquationSystemSize); ++i) { rConstantVector[i] = 0.0; } } // Auxiliar set to reorder master DoFs IndexMapType solvable_dof_reorder; // Filling with "ones" typedef std::pair<IndexType, IndexType> IndexIndexPairType; IndexType counter = 0; for (auto& dof : BaseType::mDofSet) { if (dof.EquationId() < BaseType::mEquationSystemSize) { const IndexType equation_id = dof.EquationId(); auto it = mDoFSlaveSet.find(dof); if (it == mDoFSlaveSet.end()) { solvable_dof_reorder.insert(IndexIndexPairType(equation_id, counter)); ++counter; } } } // The current process info const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Initialize the constant vector double aux_constant_value = 0.0; // Contributions to the system LocalSystemMatrixType transformation_matrix = LocalSystemMatrixType(0, 0); LocalSystemVectorType constant_vector = LocalSystemVectorType(0); // Vector containing the localization in the system of the different terms EquationIdVectorType slave_equation_id, master_equation_id; const int number_of_constraints = static_cast<int>(rModelPart.MasterSlaveConstraints().size()); std::unordered_set<IndexType> auxiliar_constant_equations_ids; #pragma omp parallel firstprivate(transformation_matrix, constant_vector, slave_equation_id, master_equation_id) { std::unordered_set<IndexType> auxiliar_temp_constant_equations_ids; auxiliar_temp_constant_equations_ids.reserve(2000); #pragma omp for schedule(guided, 512) for (int i_const = 0; i_const < number_of_constraints; ++i_const) { auto it_const = rModelPart.MasterSlaveConstraints().begin() + i_const; // Detect if the constraint is active or not. If the user did not make any choice the constraint // It is active by default bool constraint_is_active = true; if (it_const->IsDefined(ACTIVE)) constraint_is_active = it_const->Is(ACTIVE); if (constraint_is_active) { it_const->CalculateLocalSystem(transformation_matrix, constant_vector, r_current_process_info); it_const->EquationIdVector(slave_equation_id, master_equation_id, r_current_process_info); // Reassign reordered dofs to the master side for (auto& id : master_equation_id) { id = solvable_dof_reorder[id]; } if (ComputeConstantVector) { for (IndexType i = 0; i < slave_equation_id.size(); ++i) { const IndexType i_global = slave_equation_id[i]; if (i_global < BaseType::mEquationSystemSize) { const double constant_value = constant_vector[i]; if (std::abs(constant_value) > 0.0) { auxiliar_temp_constant_equations_ids.insert(i_global); double& r_value = rConstantVector[i_global]; AtomicAdd(r_value, constant_value); } } } } else { for (IndexType i = 0; i < slave_equation_id.size(); ++i) { const IndexType i_global = slave_equation_id[i]; if (i_global < BaseType::mEquationSystemSize) { const double constant_value = constant_vector[i]; AtomicAdd(aux_constant_value, std::abs(constant_value)); } } } if (ComputeTranslationMatrix) { // Assemble the constraint contribution AssembleRelationMatrix(rTMatrix, transformation_matrix, slave_equation_id, master_equation_id); } } } // We merge all the sets in one thread #pragma omp critical { auxiliar_constant_equations_ids.insert(auxiliar_temp_constant_equations_ids.begin(), auxiliar_temp_constant_equations_ids.end()); } } return aux_constant_value > std::numeric_limits<double>::epsilon() ? true : false; KRATOS_CATCH(""); } /** * @brief This method computes the efective constant * @param pScheme The pointer to the integration scheme * @param rModelPart The model part to compute * @param rDxSolved The vector of unkowns actually solved */ void ComputeEffectiveConstant( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& rDxSolved ) { if (mComputeConstantContribution) { // We get const TSystemMatrixType& rTMatrix = *mpTMatrix; TSystemVectorType& rConstantVector = *mpConstantVector; TSystemVectorType& rDeltaConstantVector = *mpDeltaConstantVector; TSparseSpace::Copy(rConstantVector, rDeltaConstantVector); // We reconstruct the complete vector of Unknowns TSystemVectorType Dx(BaseType::mEquationSystemSize); TSparseSpace::Mult(rTMatrix, rDxSolved, Dx); // Compute the effective constant vector // Auxiliar initial dof iterator const auto it_dof_begin = BaseType::mDofSet.begin(); TSystemVectorType u(BaseType::mEquationSystemSize); #pragma omp parallel for for (int i = 0; i < static_cast<int>(BaseType::mDofSet.size()); ++i) { auto it_dof = it_dof_begin + i; const IndexType equation_id = it_dof->EquationId(); if (equation_id < BaseType::mEquationSystemSize ) { u[equation_id] = it_dof->GetSolutionStepValue() + Dx[equation_id]; } } TSystemVectorType u_bar(mDoFToSolveSystemSize); IndexType counter = 0; for (IndexType i = 0; i < BaseType::mDofSet.size(); ++i) { auto it_dof = it_dof_begin + i; const IndexType equation_id = it_dof->EquationId(); if (equation_id < BaseType::mEquationSystemSize ) { auto it = mDoFSlaveSet.find(*it_dof); if (it == mDoFSlaveSet.end()) { u_bar[counter] = it_dof->GetSolutionStepValue() + rDxSolved[counter]; counter += 1; } } } TSystemVectorType u_bar_complete(BaseType::mEquationSystemSize); TSparseSpace::Mult(rTMatrix, u_bar, u_bar_complete); TSparseSpace::UnaliasedAdd(rDeltaConstantVector, 1.0, u_bar_complete); TSparseSpace::UnaliasedAdd(rDeltaConstantVector, -1.0, u); } } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class ResidualBasedEliminationBuilderAndSolverWithConstraints */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER defined */
data_gen.c
/* * Copyright (C) 2014-2015, 2018 Intel Corporation * * SPDX-License-Identifier: MIT */ #define _XOPEN_SOURCE #define _BSD_SOURCE #include <stdlib.h> #include <stdio.h> #include <omp.h> #include <ia32intrin.h> #include "euro_opt.h" tfloat RandRange( tfloat a, tfloat b, struct drand48_data *seed ) { double r; drand48_r(seed, &r); return r*(b-a) + a; } /* // This function allocates arrays to hold input and output parameters // for the Black-Scholes formula. // nopt - length of arrays // Random input parameters // s0 - initial price // x - strike price // t - maturity // Output arrays for call and put prices // vcall_compiler, vcall_mkl // vput_compiler, vput_mkl */ void InitData( int nopt, tfloat* *s0, tfloat* *x, tfloat* *t, tfloat* *vcall_compiler, tfloat* *vput_compiler, tfloat* *vcall_mkl, tfloat* *vput_mkl ) { tfloat *ts0, *tx, *tt, *tvcall_compiler, *tvput_compiler, *tvcall_mkl, *tvput_mkl; int i; /* Allocate aligned memory */ ts0 = (tfloat*)_mm_malloc( nopt * sizeof(tfloat), ALIGN_FACTOR); tx = (tfloat*)_mm_malloc( nopt * sizeof(tfloat), ALIGN_FACTOR); tt = (tfloat*)_mm_malloc( nopt * sizeof(tfloat), ALIGN_FACTOR); tvcall_compiler = (tfloat*)_mm_malloc( nopt * sizeof(tfloat), ALIGN_FACTOR); tvput_compiler = (tfloat*)_mm_malloc( nopt * sizeof(tfloat), ALIGN_FACTOR); tvcall_mkl = (tfloat*)_mm_malloc( nopt * sizeof(tfloat), ALIGN_FACTOR); tvput_mkl = (tfloat*)_mm_malloc( nopt * sizeof(tfloat), ALIGN_FACTOR); if ( (ts0 == NULL) || (tx == NULL) || (tt == NULL) || (tvcall_compiler == NULL) || (tvput_compiler == NULL) || (tvcall_mkl == NULL) || (tvput_mkl == NULL) ) { printf("Memory allocation failure\n"); exit(-1); } /* NUMA-friendly data init */ #pragma omp parallel { struct drand48_data seed; srand48_r(omp_get_thread_num()+SEED, &seed); #pragma omp for simd for ( i = 0; i < nopt; i++ ) { ts0[i] = RandRange( S0L, S0H, &seed ); tx[i] = RandRange( XL, XH, &seed ); tt[i] = RandRange( TL, TH, &seed ); tvcall_compiler[i] = 0.0; tvput_compiler[i] = 0.0; tvcall_mkl[i] = 0.0; tvput_mkl[i] = 0.0; } } *s0 = ts0; *x = tx; *t = tt; *vcall_compiler = tvcall_compiler; *vput_compiler = tvput_compiler; *vcall_mkl = tvcall_mkl; *vput_mkl = tvput_mkl; } /* Deallocate arrays */ void FreeData( tfloat *s0, tfloat *x, tfloat *t, tfloat *vcall_compiler, tfloat *vput_compiler, tfloat *vcall_mkl, tfloat *vput_mkl ) { /* Free memory */ _mm_free(s0); _mm_free(x); _mm_free(t); _mm_free(vcall_compiler); _mm_free(vput_compiler); _mm_free(vcall_mkl); _mm_free(vput_mkl); }
GB_binop__pair_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__pair_int32 // A.*B function (eWiseMult): GB_AemultB__pair_int32 // A*D function (colscale): GB_AxD__pair_int32 // D*A function (rowscale): GB_DxB__pair_int32 // C+=B function (dense accum): GB_Cdense_accumB__pair_int32 // C+=b function (dense accum): GB_Cdense_accumb__pair_int32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__pair_int32 // C=scalar+B (none) // C=scalar+B' (none) // C=A+scalar (none) // C=A'+scalar (none) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = 1 #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = 1 ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_INT32 || GxB_NO_PAIR_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__pair_int32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__pair_int32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__pair_int32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__pair_int32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__pair_int32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *GB_RESTRICT Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__pair_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__pair_int32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info (none) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
core_strsm.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_ztrsm.c, normal z -> s, Fri Sep 28 17:38:19 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "core_lapack.h" /***************************************************************************//** * * @ingroup core_trsm * * Solves one of the matrix equations * * \f[ op( A )\times X = \alpha B, \f] or * \f[ X \times op( A ) = \alpha B, \f] * * where op( A ) is one of: * \f[ op( A ) = A, \f] * \f[ op( A ) = A^T, \f] * \f[ op( A ) = A^T, \f] * * alpha is a scalar, X and B are m-by-n matrices, and * A is a unit or non-unit, upper or lower triangular matrix. * The matrix X overwrites B. * ******************************************************************************* * * @param[in] side * - PlasmaLeft: op(A)*X = B, * - PlasmaRight: X*op(A) = B. * * @param[in] uplo * - PlasmaUpper: A is upper triangular, * - PlasmaLower: A is lower triangular. * * @param[in] transa * - PlasmaNoTrans: A is not transposed, * - PlasmaTrans: A is transposed, * - PlasmaConjTrans: A is conjugate transposed. * * @param[in] diag * - PlasmaNonUnit: A has non-unit diagonal, * - PlasmaUnit: A has unit diagonal. * * @param[in] m * The number of rows of the matrix B. m >= 0. * * @param[in] n * The number of columns of the matrix B. n >= 0. * * @param[in] alpha * The scalar alpha. * * @param[in] A * The lda-by-ka triangular matrix, * where ka = m if side = PlasmaLeft, * and ka = n if side = PlasmaRight. * If uplo = PlasmaUpper, the leading k-by-k upper triangular part * of the array A contains the upper triangular matrix, and the * strictly lower triangular part of A is not referenced. * If uplo = PlasmaLower, the leading k-by-k lower triangular part * of the array A contains the lower triangular matrix, and the * strictly upper triangular part of A is not referenced. * If diag = PlasmaUnit, the diagonal elements of A are also not * referenced and are assumed to be 1. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,k). * * @param[in,out] B * On entry, the ldb-by-n right hand side matrix B. * On exit, if return value = 0, the ldb-by-n solution matrix X. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,m). * ******************************************************************************/ __attribute__((weak)) void plasma_core_strsm(plasma_enum_t side, plasma_enum_t uplo, plasma_enum_t transa, plasma_enum_t diag, int m, int n, float alpha, const float *A, int lda, float *B, int ldb) { cblas_strsm(CblasColMajor, (CBLAS_SIDE)side, (CBLAS_UPLO)uplo, (CBLAS_TRANSPOSE)transa, (CBLAS_DIAG)diag, m, n, (alpha), A, lda, B, ldb); } /******************************************************************************/ void plasma_core_omp_strsm( plasma_enum_t side, plasma_enum_t uplo, plasma_enum_t transa, plasma_enum_t diag, int m, int n, float alpha, const float *A, int lda, float *B, int ldb, plasma_sequence_t *sequence, plasma_request_t *request) { int ak; if (side == PlasmaLeft) ak = m; else ak = n; if (sequence->status == PlasmaSuccess) { int side_ = side, uplo_ = uplo; int transa_ = transa, diag_ = diag; int size_A = lda*ak, size_B = ldb*n; #pragma omp target nowait \ depend(in:A[0:size_A]) \ depend(inout:B[0:size_B]) \ firstprivate(side_, uplo_, transa_, diag_) \ firstprivate(m, n, alpha, lda, ldb) \ map(to:A[0:size_A]) \ map(tofrom:B[0:size_B]) { int block_size = 64; plasma_core_strsm(side_, uplo_, transa_, diag_, block_size, block_size, alpha, A, block_size, B, block_size); } } }
matmult_initialize.c
#include "matmult_initialize.h" void initialize(double **matrix, int rows, int cols) { int i,j; #pragma omp parallel private(i,j) shared(matrix) { //set_num_threads(); /*** Initialize matrices ***/ #pragma omp for nowait for (i=0; i<rows; i++) { for (j=0; j<cols; j++) { matrix[i][j]= i+j; } } } }